1 """Python wrappers around TensorFlow ops.
   2 
   3 This file is MACHINE GENERATED! Do not edit.
   4 Original C++ source file: math_ops.cc
   5 """
   6 
   7 import collections as _collections
   8 import six as _six
   9 
  10 from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
  11 from tensorflow.python.eager import context as _context
  12 from tensorflow.python.eager import core as _core
  13 from tensorflow.python.eager import execute as _execute
  14 from tensorflow.python.framework import dtypes as _dtypes
  15 from tensorflow.python.framework import errors as _errors
  16 from tensorflow.python.framework import tensor_shape as _tensor_shape
  17 
  18 from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
  19 # Needed to trigger the call to _set_call_cpp_shape_fn.
  20 from tensorflow.python.framework import common_shapes as _common_shapes
  21 from tensorflow.python.framework import op_def_registry as _op_def_registry
  22 from tensorflow.python.framework import ops as _ops
  23 from tensorflow.python.framework import op_def_library as _op_def_library
  24 from tensorflow.python.util.deprecation import deprecated_endpoints
  25 from tensorflow.python.util.tf_export import tf_export
  26 
  27 
  28 def _abs(x, name=None):
  29   r"""Computes the absolute value of a tensor.
  30 
  31   Given a tensor `x`, this operation returns a tensor containing the absolute
  32   value of each element in `x`. For example, if x is an input element and y is
  33   an output element, this operation computes \\(y = |x|\\).
  34 
  35   Args:
  36     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`.
  37     name: A name for the operation (optional).
  38 
  39   Returns:
  40     A `Tensor`. Has the same type as `x`.
  41   """
  42   _ctx = _context._context
  43   if _ctx is None or not _ctx._eager_context.is_eager:
  44     _, _, _op = _op_def_lib._apply_op_helper(
  45         "Abs", x=x, name=name)
  46     _result = _op.outputs[:]
  47     _inputs_flat = _op.inputs
  48     _attrs = ("T", _op.get_attr("T"))
  49     _execute.record_gradient(
  50       "Abs", _inputs_flat, _attrs, _result, name)
  51     _result, = _result
  52     return _result
  53 
  54   else:
  55     try:
  56       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
  57         _ctx._context_handle, _ctx._eager_context.device_name, "Abs", name,
  58         _ctx._post_execution_callbacks, x)
  59       return _result
  60     except _core._FallbackException:
  61       return _abs_eager_fallback(
  62           x, name=name, ctx=_ctx)
  63     except _core._NotOkStatusException as e:
  64       if name is not None:
  65         message = e.message + " name: " + name
  66       else:
  67         message = e.message
  68       _six.raise_from(_core._status_to_exception(e.code, message), None)
  69 
  70 
  71 def _abs_eager_fallback(x, name=None, ctx=None):
  72   r"""This is the slowpath function for Eager mode.
  73   This is for function _abs
  74   """
  75   _ctx = ctx if ctx else _context.context()
  76   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
  77   _inputs_flat = [x]
  78   _attrs = ("T", _attr_T)
  79   _result = _execute.execute(b"Abs", 1, inputs=_inputs_flat, attrs=_attrs,
  80                              ctx=_ctx, name=name)
  81   _execute.record_gradient(
  82       "Abs", _inputs_flat, _attrs, _result, name)
  83   _result, = _result
  84   return _result
  85 
  86 
  87 def accumulate_nv2(inputs, shape, name=None):
  88   r"""Returns the element-wise sum of a list of tensors.
  89 
  90   `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not
  91   wait for all of its inputs to be ready before beginning to sum. This can
  92   save memory if inputs are ready at different times, since minimum temporary
  93   storage is proportional to the output size rather than the inputs size.
  94 
  95   Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable.
  96 
  97   Returns a `Tensor` of same shape and type as the elements of `inputs`.
  98 
  99   Args:
 100     inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 101       A list of `Tensor` objects, each with same shape and type.
 102     shape: A `tf.TensorShape` or list of `ints`.
 103       Shape of elements of `inputs`.
 104     name: A name for the operation (optional).
 105 
 106   Returns:
 107     A `Tensor`. Has the same type as `inputs`.
 108   """
 109   _ctx = _context._context
 110   if _ctx is None or not _ctx._eager_context.is_eager:
 111     if not isinstance(inputs, (list, tuple)):
 112       raise TypeError(
 113           "Expected list for 'inputs' argument to "
 114           "'accumulate_nv2' Op, not %r." % inputs)
 115     _attr_N = len(inputs)
 116     shape = _execute.make_shape(shape, "shape")
 117     _, _, _op = _op_def_lib._apply_op_helper(
 118         "AccumulateNV2", inputs=inputs, shape=shape, name=name)
 119     _result = _op.outputs[:]
 120     _inputs_flat = _op.inputs
 121     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"), "shape",
 122               _op.get_attr("shape"))
 123     _execute.record_gradient(
 124       "AccumulateNV2", _inputs_flat, _attrs, _result, name)
 125     _result, = _result
 126     return _result
 127 
 128   else:
 129     try:
 130       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 131         _ctx._context_handle, _ctx._eager_context.device_name,
 132         "AccumulateNV2", name, _ctx._post_execution_callbacks, inputs,
 133         "shape", shape)
 134       return _result
 135     except _core._FallbackException:
 136       return accumulate_nv2_eager_fallback(
 137           inputs, shape=shape, name=name, ctx=_ctx)
 138     except _core._NotOkStatusException as e:
 139       if name is not None:
 140         message = e.message + " name: " + name
 141       else:
 142         message = e.message
 143       _six.raise_from(_core._status_to_exception(e.code, message), None)
 144 
 145 
 146 def accumulate_nv2_eager_fallback(inputs, shape, name=None, ctx=None):
 147   r"""This is the slowpath function for Eager mode.
 148   This is for function accumulate_nv2
 149   """
 150   _ctx = ctx if ctx else _context.context()
 151   if not isinstance(inputs, (list, tuple)):
 152     raise TypeError(
 153         "Expected list for 'inputs' argument to "
 154         "'accumulate_nv2' Op, not %r." % inputs)
 155   _attr_N = len(inputs)
 156   shape = _execute.make_shape(shape, "shape")
 157   _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), _ctx)
 158   _inputs_flat = list(inputs)
 159   _attrs = ("N", _attr_N, "T", _attr_T, "shape", shape)
 160   _result = _execute.execute(b"AccumulateNV2", 1, inputs=_inputs_flat,
 161                              attrs=_attrs, ctx=_ctx, name=name)
 162   _execute.record_gradient(
 163       "AccumulateNV2", _inputs_flat, _attrs, _result, name)
 164   _result, = _result
 165   return _result
 166 
 167 
 168 @tf_export('math.acos', 'acos')
 169 @deprecated_endpoints('acos')
 170 def acos(x, name=None):
 171   r"""Computes acos of x element-wise.
 172 
 173   Args:
 174     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
 175     name: A name for the operation (optional).
 176 
 177   Returns:
 178     A `Tensor`. Has the same type as `x`.
 179   """
 180   _ctx = _context._context
 181   if _ctx is None or not _ctx._eager_context.is_eager:
 182     _, _, _op = _op_def_lib._apply_op_helper(
 183         "Acos", x=x, name=name)
 184     _result = _op.outputs[:]
 185     _inputs_flat = _op.inputs
 186     _attrs = ("T", _op.get_attr("T"))
 187     _execute.record_gradient(
 188       "Acos", _inputs_flat, _attrs, _result, name)
 189     _result, = _result
 190     return _result
 191 
 192   else:
 193     try:
 194       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 195         _ctx._context_handle, _ctx._eager_context.device_name, "Acos", name,
 196         _ctx._post_execution_callbacks, x)
 197       return _result
 198     except _core._FallbackException:
 199       return acos_eager_fallback(
 200           x, name=name, ctx=_ctx)
 201     except _core._NotOkStatusException as e:
 202       if name is not None:
 203         message = e.message + " name: " + name
 204       else:
 205         message = e.message
 206       _six.raise_from(_core._status_to_exception(e.code, message), None)
 207 
 208 
 209 def acos_eager_fallback(x, name=None, ctx=None):
 210   r"""This is the slowpath function for Eager mode.
 211   This is for function acos
 212   """
 213   _ctx = ctx if ctx else _context.context()
 214   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
 215   _inputs_flat = [x]
 216   _attrs = ("T", _attr_T)
 217   _result = _execute.execute(b"Acos", 1, inputs=_inputs_flat, attrs=_attrs,
 218                              ctx=_ctx, name=name)
 219   _execute.record_gradient(
 220       "Acos", _inputs_flat, _attrs, _result, name)
 221   _result, = _result
 222   return _result
 223 
 224 
 225 @tf_export('math.acosh', 'acosh')
 226 @deprecated_endpoints('acosh')
 227 def acosh(x, name=None):
 228   r"""Computes inverse hyperbolic cosine of x element-wise.
 229 
 230   Args:
 231     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
 232     name: A name for the operation (optional).
 233 
 234   Returns:
 235     A `Tensor`. Has the same type as `x`.
 236   """
 237   _ctx = _context._context
 238   if _ctx is None or not _ctx._eager_context.is_eager:
 239     _, _, _op = _op_def_lib._apply_op_helper(
 240         "Acosh", x=x, name=name)
 241     _result = _op.outputs[:]
 242     _inputs_flat = _op.inputs
 243     _attrs = ("T", _op.get_attr("T"))
 244     _execute.record_gradient(
 245       "Acosh", _inputs_flat, _attrs, _result, name)
 246     _result, = _result
 247     return _result
 248 
 249   else:
 250     try:
 251       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 252         _ctx._context_handle, _ctx._eager_context.device_name, "Acosh", name,
 253         _ctx._post_execution_callbacks, x)
 254       return _result
 255     except _core._FallbackException:
 256       return acosh_eager_fallback(
 257           x, name=name, ctx=_ctx)
 258     except _core._NotOkStatusException as e:
 259       if name is not None:
 260         message = e.message + " name: " + name
 261       else:
 262         message = e.message
 263       _six.raise_from(_core._status_to_exception(e.code, message), None)
 264 
 265 
 266 def acosh_eager_fallback(x, name=None, ctx=None):
 267   r"""This is the slowpath function for Eager mode.
 268   This is for function acosh
 269   """
 270   _ctx = ctx if ctx else _context.context()
 271   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
 272   _inputs_flat = [x]
 273   _attrs = ("T", _attr_T)
 274   _result = _execute.execute(b"Acosh", 1, inputs=_inputs_flat, attrs=_attrs,
 275                              ctx=_ctx, name=name)
 276   _execute.record_gradient(
 277       "Acosh", _inputs_flat, _attrs, _result, name)
 278   _result, = _result
 279   return _result
 280 
 281 
 282 @tf_export('math.add', 'add')
 283 @deprecated_endpoints('add')
 284 def add(x, y, name=None):
 285   r"""Returns x + y element-wise.
 286 
 287   *NOTE*: `math.add` supports broadcasting. `AddN` does not. More about broadcasting
 288   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
 289 
 290   Args:
 291     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`, `string`.
 292     y: A `Tensor`. Must have the same type as `x`.
 293     name: A name for the operation (optional).
 294 
 295   Returns:
 296     A `Tensor`. Has the same type as `x`.
 297   """
 298   _ctx = _context._context
 299   if _ctx is None or not _ctx._eager_context.is_eager:
 300     _, _, _op = _op_def_lib._apply_op_helper(
 301         "Add", x=x, y=y, name=name)
 302     _result = _op.outputs[:]
 303     _inputs_flat = _op.inputs
 304     _attrs = ("T", _op.get_attr("T"))
 305     _execute.record_gradient(
 306       "Add", _inputs_flat, _attrs, _result, name)
 307     _result, = _result
 308     return _result
 309 
 310   else:
 311     try:
 312       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 313         _ctx._context_handle, _ctx._eager_context.device_name, "Add", name,
 314         _ctx._post_execution_callbacks, x, y)
 315       return _result
 316     except _core._FallbackException:
 317       return add_eager_fallback(
 318           x, y, name=name, ctx=_ctx)
 319     except _core._NotOkStatusException as e:
 320       if name is not None:
 321         message = e.message + " name: " + name
 322       else:
 323         message = e.message
 324       _six.raise_from(_core._status_to_exception(e.code, message), None)
 325 
 326 
 327 def add_eager_fallback(x, y, name=None, ctx=None):
 328   r"""This is the slowpath function for Eager mode.
 329   This is for function add
 330   """
 331   _ctx = ctx if ctx else _context.context()
 332   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
 333   (x, y) = _inputs_T
 334   _inputs_flat = [x, y]
 335   _attrs = ("T", _attr_T)
 336   _result = _execute.execute(b"Add", 1, inputs=_inputs_flat, attrs=_attrs,
 337                              ctx=_ctx, name=name)
 338   _execute.record_gradient(
 339       "Add", _inputs_flat, _attrs, _result, name)
 340   _result, = _result
 341   return _result
 342 
 343 
 344 def add_n(inputs, name=None):
 345   r"""Add all input tensors element wise.
 346 
 347   Args:
 348     inputs: A list of at least 1 `Tensor` objects with the same type in: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`, `variant`.
 349       Must all be the same size and shape.
 350     name: A name for the operation (optional).
 351 
 352   Returns:
 353     A `Tensor`. Has the same type as `inputs`.
 354   """
 355   _ctx = _context._context
 356   if _ctx is None or not _ctx._eager_context.is_eager:
 357     if not isinstance(inputs, (list, tuple)):
 358       raise TypeError(
 359           "Expected list for 'inputs' argument to "
 360           "'add_n' Op, not %r." % inputs)
 361     _attr_N = len(inputs)
 362     _, _, _op = _op_def_lib._apply_op_helper(
 363         "AddN", inputs=inputs, name=name)
 364     _result = _op.outputs[:]
 365     _inputs_flat = _op.inputs
 366     _attrs = ("N", _op.get_attr("N"), "T", _op.get_attr("T"))
 367     _execute.record_gradient(
 368       "AddN", _inputs_flat, _attrs, _result, name)
 369     _result, = _result
 370     return _result
 371 
 372   else:
 373     try:
 374       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 375         _ctx._context_handle, _ctx._eager_context.device_name, "AddN", name,
 376         _ctx._post_execution_callbacks, inputs)
 377       return _result
 378     except _core._FallbackException:
 379       return add_n_eager_fallback(
 380           inputs, name=name, ctx=_ctx)
 381     except _core._NotOkStatusException as e:
 382       if name is not None:
 383         message = e.message + " name: " + name
 384       else:
 385         message = e.message
 386       _six.raise_from(_core._status_to_exception(e.code, message), None)
 387 
 388 
 389 def add_n_eager_fallback(inputs, name=None, ctx=None):
 390   r"""This is the slowpath function for Eager mode.
 391   This is for function add_n
 392   """
 393   _ctx = ctx if ctx else _context.context()
 394   if not isinstance(inputs, (list, tuple)):
 395     raise TypeError(
 396         "Expected list for 'inputs' argument to "
 397         "'add_n' Op, not %r." % inputs)
 398   _attr_N = len(inputs)
 399   _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), _ctx)
 400   _inputs_flat = list(inputs)
 401   _attrs = ("N", _attr_N, "T", _attr_T)
 402   _result = _execute.execute(b"AddN", 1, inputs=_inputs_flat, attrs=_attrs,
 403                              ctx=_ctx, name=name)
 404   _execute.record_gradient(
 405       "AddN", _inputs_flat, _attrs, _result, name)
 406   _result, = _result
 407   return _result
 408 
 409 
 410 def add_v2(x, y, name=None):
 411   r"""Returns x + y element-wise.
 412 
 413   *NOTE*: `Add` supports broadcasting. `AddN` does not. More about broadcasting
 414   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
 415 
 416   Args:
 417     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
 418     y: A `Tensor`. Must have the same type as `x`.
 419     name: A name for the operation (optional).
 420 
 421   Returns:
 422     A `Tensor`. Has the same type as `x`.
 423   """
 424   _ctx = _context._context
 425   if _ctx is None or not _ctx._eager_context.is_eager:
 426     _, _, _op = _op_def_lib._apply_op_helper(
 427         "AddV2", x=x, y=y, name=name)
 428     _result = _op.outputs[:]
 429     _inputs_flat = _op.inputs
 430     _attrs = ("T", _op.get_attr("T"))
 431     _execute.record_gradient(
 432       "AddV2", _inputs_flat, _attrs, _result, name)
 433     _result, = _result
 434     return _result
 435 
 436   else:
 437     try:
 438       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 439         _ctx._context_handle, _ctx._eager_context.device_name, "AddV2", name,
 440         _ctx._post_execution_callbacks, x, y)
 441       return _result
 442     except _core._FallbackException:
 443       return add_v2_eager_fallback(
 444           x, y, name=name, ctx=_ctx)
 445     except _core._NotOkStatusException as e:
 446       if name is not None:
 447         message = e.message + " name: " + name
 448       else:
 449         message = e.message
 450       _six.raise_from(_core._status_to_exception(e.code, message), None)
 451 
 452 
 453 def add_v2_eager_fallback(x, y, name=None, ctx=None):
 454   r"""This is the slowpath function for Eager mode.
 455   This is for function add_v2
 456   """
 457   _ctx = ctx if ctx else _context.context()
 458   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
 459   (x, y) = _inputs_T
 460   _inputs_flat = [x, y]
 461   _attrs = ("T", _attr_T)
 462   _result = _execute.execute(b"AddV2", 1, inputs=_inputs_flat, attrs=_attrs,
 463                              ctx=_ctx, name=name)
 464   _execute.record_gradient(
 465       "AddV2", _inputs_flat, _attrs, _result, name)
 466   _result, = _result
 467   return _result
 468 
 469 
 470 def _all(input, axis, keep_dims=False, name=None):
 471   r"""Computes the "logical and" of elements across dimensions of a tensor.
 472 
 473   Reduces `input` along the dimensions given in `axis`. Unless
 474   `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
 475   `axis`. If `keep_dims` is true, the reduced dimensions are
 476   retained with length 1.
 477 
 478   Args:
 479     input: A `Tensor` of type `bool`. The tensor to reduce.
 480     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 481       The dimensions to reduce. Must be in the range
 482       `[-rank(input), rank(input))`.
 483     keep_dims: An optional `bool`. Defaults to `False`.
 484       If true, retain reduced dimensions with length 1.
 485     name: A name for the operation (optional).
 486 
 487   Returns:
 488     A `Tensor` of type `bool`.
 489   """
 490   _ctx = _context._context
 491   if _ctx is None or not _ctx._eager_context.is_eager:
 492     if keep_dims is None:
 493       keep_dims = False
 494     keep_dims = _execute.make_bool(keep_dims, "keep_dims")
 495     _, _, _op = _op_def_lib._apply_op_helper(
 496         "All", input=input, reduction_indices=axis, keep_dims=keep_dims,
 497         name=name)
 498     _result = _op.outputs[:]
 499     _inputs_flat = _op.inputs
 500     _attrs = ("keep_dims", _op.get_attr("keep_dims"), "Tidx",
 501               _op.get_attr("Tidx"))
 502     _execute.record_gradient(
 503       "All", _inputs_flat, _attrs, _result, name)
 504     _result, = _result
 505     return _result
 506 
 507   else:
 508     try:
 509       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 510         _ctx._context_handle, _ctx._eager_context.device_name, "All", name,
 511         _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
 512       return _result
 513     except _core._FallbackException:
 514       return _all_eager_fallback(
 515           input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
 516     except _core._NotOkStatusException as e:
 517       if name is not None:
 518         message = e.message + " name: " + name
 519       else:
 520         message = e.message
 521       _six.raise_from(_core._status_to_exception(e.code, message), None)
 522 
 523 
 524 def _all_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
 525   r"""This is the slowpath function for Eager mode.
 526   This is for function _all
 527   """
 528   _ctx = ctx if ctx else _context.context()
 529   if keep_dims is None:
 530     keep_dims = False
 531   keep_dims = _execute.make_bool(keep_dims, "keep_dims")
 532   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
 533   input = _ops.convert_to_tensor(input, _dtypes.bool)
 534   _inputs_flat = [input, axis]
 535   _attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx)
 536   _result = _execute.execute(b"All", 1, inputs=_inputs_flat, attrs=_attrs,
 537                              ctx=_ctx, name=name)
 538   _execute.record_gradient(
 539       "All", _inputs_flat, _attrs, _result, name)
 540   _result, = _result
 541   return _result
 542 
 543 
 544 def angle(input, Tout=_dtypes.float32, name=None):
 545   r"""Returns the argument of a complex number.
 546 
 547   Given a tensor `input` of complex numbers, this operation returns a tensor of
 548   type `float` that is the argument of each element in `input`. All elements in
 549   `input` must be complex numbers of the form \\(a + bj\\), where *a*
 550   is the real part and *b* is the imaginary part.
 551 
 552   The argument returned by this operation is of the form \\(atan2(b, a)\\).
 553 
 554   For example:
 555 
 556   ```
 557   # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
 558   tf.angle(input) ==> [2.0132, 1.056]
 559   ```
 560 
 561   @compatibility(numpy)
 562   Equivalent to np.angle.
 563   @end_compatibility
 564 
 565   Args:
 566     input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
 567     Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
 568     name: A name for the operation (optional).
 569 
 570   Returns:
 571     A `Tensor` of type `Tout`.
 572   """
 573   _ctx = _context._context
 574   if _ctx is None or not _ctx._eager_context.is_eager:
 575     if Tout is None:
 576       Tout = _dtypes.float32
 577     Tout = _execute.make_type(Tout, "Tout")
 578     _, _, _op = _op_def_lib._apply_op_helper(
 579         "Angle", input=input, Tout=Tout, name=name)
 580     _result = _op.outputs[:]
 581     _inputs_flat = _op.inputs
 582     _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
 583     _execute.record_gradient(
 584       "Angle", _inputs_flat, _attrs, _result, name)
 585     _result, = _result
 586     return _result
 587 
 588   else:
 589     try:
 590       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 591         _ctx._context_handle, _ctx._eager_context.device_name, "Angle", name,
 592         _ctx._post_execution_callbacks, input, "Tout", Tout)
 593       return _result
 594     except _core._FallbackException:
 595       return angle_eager_fallback(
 596           input, Tout=Tout, name=name, ctx=_ctx)
 597     except _core._NotOkStatusException as e:
 598       if name is not None:
 599         message = e.message + " name: " + name
 600       else:
 601         message = e.message
 602       _six.raise_from(_core._status_to_exception(e.code, message), None)
 603 
 604 
 605 def angle_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
 606   r"""This is the slowpath function for Eager mode.
 607   This is for function angle
 608   """
 609   _ctx = ctx if ctx else _context.context()
 610   if Tout is None:
 611     Tout = _dtypes.float32
 612   Tout = _execute.make_type(Tout, "Tout")
 613   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
 614   _inputs_flat = [input]
 615   _attrs = ("T", _attr_T, "Tout", Tout)
 616   _result = _execute.execute(b"Angle", 1, inputs=_inputs_flat, attrs=_attrs,
 617                              ctx=_ctx, name=name)
 618   _execute.record_gradient(
 619       "Angle", _inputs_flat, _attrs, _result, name)
 620   _result, = _result
 621   return _result
 622 
 623 
 624 def _any(input, axis, keep_dims=False, name=None):
 625   r"""Computes the "logical or" of elements across dimensions of a tensor.
 626 
 627   Reduces `input` along the dimensions given in `axis`. Unless
 628   `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
 629   `axis`. If `keep_dims` is true, the reduced dimensions are
 630   retained with length 1.
 631 
 632   Args:
 633     input: A `Tensor` of type `bool`. The tensor to reduce.
 634     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 635       The dimensions to reduce. Must be in the range
 636       `[-rank(input), rank(input))`.
 637     keep_dims: An optional `bool`. Defaults to `False`.
 638       If true, retain reduced dimensions with length 1.
 639     name: A name for the operation (optional).
 640 
 641   Returns:
 642     A `Tensor` of type `bool`.
 643   """
 644   _ctx = _context._context
 645   if _ctx is None or not _ctx._eager_context.is_eager:
 646     if keep_dims is None:
 647       keep_dims = False
 648     keep_dims = _execute.make_bool(keep_dims, "keep_dims")
 649     _, _, _op = _op_def_lib._apply_op_helper(
 650         "Any", input=input, reduction_indices=axis, keep_dims=keep_dims,
 651         name=name)
 652     _result = _op.outputs[:]
 653     _inputs_flat = _op.inputs
 654     _attrs = ("keep_dims", _op.get_attr("keep_dims"), "Tidx",
 655               _op.get_attr("Tidx"))
 656     _execute.record_gradient(
 657       "Any", _inputs_flat, _attrs, _result, name)
 658     _result, = _result
 659     return _result
 660 
 661   else:
 662     try:
 663       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 664         _ctx._context_handle, _ctx._eager_context.device_name, "Any", name,
 665         _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
 666       return _result
 667     except _core._FallbackException:
 668       return _any_eager_fallback(
 669           input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
 670     except _core._NotOkStatusException as e:
 671       if name is not None:
 672         message = e.message + " name: " + name
 673       else:
 674         message = e.message
 675       _six.raise_from(_core._status_to_exception(e.code, message), None)
 676 
 677 
 678 def _any_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
 679   r"""This is the slowpath function for Eager mode.
 680   This is for function _any
 681   """
 682   _ctx = ctx if ctx else _context.context()
 683   if keep_dims is None:
 684     keep_dims = False
 685   keep_dims = _execute.make_bool(keep_dims, "keep_dims")
 686   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
 687   input = _ops.convert_to_tensor(input, _dtypes.bool)
 688   _inputs_flat = [input, axis]
 689   _attrs = ("keep_dims", keep_dims, "Tidx", _attr_Tidx)
 690   _result = _execute.execute(b"Any", 1, inputs=_inputs_flat, attrs=_attrs,
 691                              ctx=_ctx, name=name)
 692   _execute.record_gradient(
 693       "Any", _inputs_flat, _attrs, _result, name)
 694   _result, = _result
 695   return _result
 696 
 697 
 698 def approximate_equal(x, y, tolerance=1e-05, name=None):
 699   r"""Returns the truth value of abs(x-y) < tolerance element-wise.
 700 
 701   Args:
 702     x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 703     y: A `Tensor`. Must have the same type as `x`.
 704     tolerance: An optional `float`. Defaults to `1e-05`.
 705     name: A name for the operation (optional).
 706 
 707   Returns:
 708     A `Tensor` of type `bool`.
 709   """
 710   _ctx = _context._context
 711   if _ctx is None or not _ctx._eager_context.is_eager:
 712     if tolerance is None:
 713       tolerance = 1e-05
 714     tolerance = _execute.make_float(tolerance, "tolerance")
 715     _, _, _op = _op_def_lib._apply_op_helper(
 716         "ApproximateEqual", x=x, y=y, tolerance=tolerance, name=name)
 717     _result = _op.outputs[:]
 718     _inputs_flat = _op.inputs
 719     _attrs = ("T", _op.get_attr("T"), "tolerance", _op.get_attr("tolerance"))
 720     _execute.record_gradient(
 721       "ApproximateEqual", _inputs_flat, _attrs, _result, name)
 722     _result, = _result
 723     return _result
 724 
 725   else:
 726     try:
 727       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 728         _ctx._context_handle, _ctx._eager_context.device_name,
 729         "ApproximateEqual", name, _ctx._post_execution_callbacks, x, y,
 730         "tolerance", tolerance)
 731       return _result
 732     except _core._FallbackException:
 733       return approximate_equal_eager_fallback(
 734           x, y, tolerance=tolerance, name=name, ctx=_ctx)
 735     except _core._NotOkStatusException as e:
 736       if name is not None:
 737         message = e.message + " name: " + name
 738       else:
 739         message = e.message
 740       _six.raise_from(_core._status_to_exception(e.code, message), None)
 741 
 742 
 743 def approximate_equal_eager_fallback(x, y, tolerance=1e-05, name=None, ctx=None):
 744   r"""This is the slowpath function for Eager mode.
 745   This is for function approximate_equal
 746   """
 747   _ctx = ctx if ctx else _context.context()
 748   if tolerance is None:
 749     tolerance = 1e-05
 750   tolerance = _execute.make_float(tolerance, "tolerance")
 751   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
 752   (x, y) = _inputs_T
 753   _inputs_flat = [x, y]
 754   _attrs = ("T", _attr_T, "tolerance", tolerance)
 755   _result = _execute.execute(b"ApproximateEqual", 1, inputs=_inputs_flat,
 756                              attrs=_attrs, ctx=_ctx, name=name)
 757   _execute.record_gradient(
 758       "ApproximateEqual", _inputs_flat, _attrs, _result, name)
 759   _result, = _result
 760   return _result
 761 
 762 
 763 def arg_max(input, dimension, output_type=_dtypes.int64, name=None):
 764   r"""Returns the index with the largest value across dimensions of a tensor.
 765 
 766   Note that in case of ties the identity of the return value is not guaranteed.
 767 
 768   Args:
 769     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 770     dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 771       int32 or int64, must be in the range `[-rank(input), rank(input))`.
 772       Describes which dimension of the input Tensor to reduce across. For vectors,
 773       use dimension = 0.
 774     output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
 775     name: A name for the operation (optional).
 776 
 777   Returns:
 778     A `Tensor` of type `output_type`.
 779   """
 780   _ctx = _context._context
 781   if _ctx is None or not _ctx._eager_context.is_eager:
 782     if output_type is None:
 783       output_type = _dtypes.int64
 784     output_type = _execute.make_type(output_type, "output_type")
 785     _, _, _op = _op_def_lib._apply_op_helper(
 786         "ArgMax", input=input, dimension=dimension, output_type=output_type,
 787         name=name)
 788     _result = _op.outputs[:]
 789     _inputs_flat = _op.inputs
 790     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
 791               "output_type", _op.get_attr("output_type"))
 792     _execute.record_gradient(
 793       "ArgMax", _inputs_flat, _attrs, _result, name)
 794     _result, = _result
 795     return _result
 796 
 797   else:
 798     try:
 799       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 800         _ctx._context_handle, _ctx._eager_context.device_name, "ArgMax", name,
 801         _ctx._post_execution_callbacks, input, dimension, "output_type",
 802         output_type)
 803       return _result
 804     except _core._FallbackException:
 805       return arg_max_eager_fallback(
 806           input, dimension, output_type=output_type, name=name, ctx=_ctx)
 807     except _core._NotOkStatusException as e:
 808       if name is not None:
 809         message = e.message + " name: " + name
 810       else:
 811         message = e.message
 812       _six.raise_from(_core._status_to_exception(e.code, message), None)
 813 
 814 
 815 def arg_max_eager_fallback(input, dimension, output_type=_dtypes.int64, name=None, ctx=None):
 816   r"""This is the slowpath function for Eager mode.
 817   This is for function arg_max
 818   """
 819   _ctx = ctx if ctx else _context.context()
 820   if output_type is None:
 821     output_type = _dtypes.int64
 822   output_type = _execute.make_type(output_type, "output_type")
 823   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
 824   _attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], _ctx, _dtypes.int32)
 825   _inputs_flat = [input, dimension]
 826   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type)
 827   _result = _execute.execute(b"ArgMax", 1, inputs=_inputs_flat, attrs=_attrs,
 828                              ctx=_ctx, name=name)
 829   _execute.record_gradient(
 830       "ArgMax", _inputs_flat, _attrs, _result, name)
 831   _result, = _result
 832   return _result
 833 
 834 
 835 def arg_min(input, dimension, output_type=_dtypes.int64, name=None):
 836   r"""Returns the index with the smallest value across dimensions of a tensor.
 837 
 838   Note that in case of ties the identity of the return value is not guaranteed.
 839 
 840   Args:
 841     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
 842     dimension: A `Tensor`. Must be one of the following types: `int32`, `int64`.
 843       int32 or int64, must be in the range `[-rank(input), rank(input))`.
 844       Describes which dimension of the input Tensor to reduce across. For vectors,
 845       use dimension = 0.
 846     output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.
 847     name: A name for the operation (optional).
 848 
 849   Returns:
 850     A `Tensor` of type `output_type`.
 851   """
 852   _ctx = _context._context
 853   if _ctx is None or not _ctx._eager_context.is_eager:
 854     if output_type is None:
 855       output_type = _dtypes.int64
 856     output_type = _execute.make_type(output_type, "output_type")
 857     _, _, _op = _op_def_lib._apply_op_helper(
 858         "ArgMin", input=input, dimension=dimension, output_type=output_type,
 859         name=name)
 860     _result = _op.outputs[:]
 861     _inputs_flat = _op.inputs
 862     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
 863               "output_type", _op.get_attr("output_type"))
 864     _execute.record_gradient(
 865       "ArgMin", _inputs_flat, _attrs, _result, name)
 866     _result, = _result
 867     return _result
 868 
 869   else:
 870     try:
 871       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 872         _ctx._context_handle, _ctx._eager_context.device_name, "ArgMin", name,
 873         _ctx._post_execution_callbacks, input, dimension, "output_type",
 874         output_type)
 875       return _result
 876     except _core._FallbackException:
 877       return arg_min_eager_fallback(
 878           input, dimension, output_type=output_type, name=name, ctx=_ctx)
 879     except _core._NotOkStatusException as e:
 880       if name is not None:
 881         message = e.message + " name: " + name
 882       else:
 883         message = e.message
 884       _six.raise_from(_core._status_to_exception(e.code, message), None)
 885 
 886 
 887 def arg_min_eager_fallback(input, dimension, output_type=_dtypes.int64, name=None, ctx=None):
 888   r"""This is the slowpath function for Eager mode.
 889   This is for function arg_min
 890   """
 891   _ctx = ctx if ctx else _context.context()
 892   if output_type is None:
 893     output_type = _dtypes.int64
 894   output_type = _execute.make_type(output_type, "output_type")
 895   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
 896   _attr_Tidx, (dimension,) = _execute.args_to_matching_eager([dimension], _ctx, _dtypes.int32)
 897   _inputs_flat = [input, dimension]
 898   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "output_type", output_type)
 899   _result = _execute.execute(b"ArgMin", 1, inputs=_inputs_flat, attrs=_attrs,
 900                              ctx=_ctx, name=name)
 901   _execute.record_gradient(
 902       "ArgMin", _inputs_flat, _attrs, _result, name)
 903   _result, = _result
 904   return _result
 905 
 906 
 907 @tf_export('math.asin', 'asin')
 908 @deprecated_endpoints('asin')
 909 def asin(x, name=None):
 910   r"""Computes asin of x element-wise.
 911 
 912   Args:
 913     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
 914     name: A name for the operation (optional).
 915 
 916   Returns:
 917     A `Tensor`. Has the same type as `x`.
 918   """
 919   _ctx = _context._context
 920   if _ctx is None or not _ctx._eager_context.is_eager:
 921     _, _, _op = _op_def_lib._apply_op_helper(
 922         "Asin", x=x, name=name)
 923     _result = _op.outputs[:]
 924     _inputs_flat = _op.inputs
 925     _attrs = ("T", _op.get_attr("T"))
 926     _execute.record_gradient(
 927       "Asin", _inputs_flat, _attrs, _result, name)
 928     _result, = _result
 929     return _result
 930 
 931   else:
 932     try:
 933       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 934         _ctx._context_handle, _ctx._eager_context.device_name, "Asin", name,
 935         _ctx._post_execution_callbacks, x)
 936       return _result
 937     except _core._FallbackException:
 938       return asin_eager_fallback(
 939           x, name=name, ctx=_ctx)
 940     except _core._NotOkStatusException as e:
 941       if name is not None:
 942         message = e.message + " name: " + name
 943       else:
 944         message = e.message
 945       _six.raise_from(_core._status_to_exception(e.code, message), None)
 946 
 947 
 948 def asin_eager_fallback(x, name=None, ctx=None):
 949   r"""This is the slowpath function for Eager mode.
 950   This is for function asin
 951   """
 952   _ctx = ctx if ctx else _context.context()
 953   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
 954   _inputs_flat = [x]
 955   _attrs = ("T", _attr_T)
 956   _result = _execute.execute(b"Asin", 1, inputs=_inputs_flat, attrs=_attrs,
 957                              ctx=_ctx, name=name)
 958   _execute.record_gradient(
 959       "Asin", _inputs_flat, _attrs, _result, name)
 960   _result, = _result
 961   return _result
 962 
 963 
 964 @tf_export('math.asinh', 'asinh')
 965 @deprecated_endpoints('asinh')
 966 def asinh(x, name=None):
 967   r"""Computes inverse hyperbolic sine of x element-wise.
 968 
 969   Args:
 970     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
 971     name: A name for the operation (optional).
 972 
 973   Returns:
 974     A `Tensor`. Has the same type as `x`.
 975   """
 976   _ctx = _context._context
 977   if _ctx is None or not _ctx._eager_context.is_eager:
 978     _, _, _op = _op_def_lib._apply_op_helper(
 979         "Asinh", x=x, name=name)
 980     _result = _op.outputs[:]
 981     _inputs_flat = _op.inputs
 982     _attrs = ("T", _op.get_attr("T"))
 983     _execute.record_gradient(
 984       "Asinh", _inputs_flat, _attrs, _result, name)
 985     _result, = _result
 986     return _result
 987 
 988   else:
 989     try:
 990       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
 991         _ctx._context_handle, _ctx._eager_context.device_name, "Asinh", name,
 992         _ctx._post_execution_callbacks, x)
 993       return _result
 994     except _core._FallbackException:
 995       return asinh_eager_fallback(
 996           x, name=name, ctx=_ctx)
 997     except _core._NotOkStatusException as e:
 998       if name is not None:
 999         message = e.message + " name: " + name
1000       else:
1001         message = e.message
1002       _six.raise_from(_core._status_to_exception(e.code, message), None)
1003 
1004 
1005 def asinh_eager_fallback(x, name=None, ctx=None):
1006   r"""This is the slowpath function for Eager mode.
1007   This is for function asinh
1008   """
1009   _ctx = ctx if ctx else _context.context()
1010   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
1011   _inputs_flat = [x]
1012   _attrs = ("T", _attr_T)
1013   _result = _execute.execute(b"Asinh", 1, inputs=_inputs_flat, attrs=_attrs,
1014                              ctx=_ctx, name=name)
1015   _execute.record_gradient(
1016       "Asinh", _inputs_flat, _attrs, _result, name)
1017   _result, = _result
1018   return _result
1019 
1020 
1021 @tf_export('math.atan', 'atan')
1022 @deprecated_endpoints('atan')
1023 def atan(x, name=None):
1024   r"""Computes atan of x element-wise.
1025 
1026   Args:
1027     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
1028     name: A name for the operation (optional).
1029 
1030   Returns:
1031     A `Tensor`. Has the same type as `x`.
1032   """
1033   _ctx = _context._context
1034   if _ctx is None or not _ctx._eager_context.is_eager:
1035     _, _, _op = _op_def_lib._apply_op_helper(
1036         "Atan", x=x, name=name)
1037     _result = _op.outputs[:]
1038     _inputs_flat = _op.inputs
1039     _attrs = ("T", _op.get_attr("T"))
1040     _execute.record_gradient(
1041       "Atan", _inputs_flat, _attrs, _result, name)
1042     _result, = _result
1043     return _result
1044 
1045   else:
1046     try:
1047       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1048         _ctx._context_handle, _ctx._eager_context.device_name, "Atan", name,
1049         _ctx._post_execution_callbacks, x)
1050       return _result
1051     except _core._FallbackException:
1052       return atan_eager_fallback(
1053           x, name=name, ctx=_ctx)
1054     except _core._NotOkStatusException as e:
1055       if name is not None:
1056         message = e.message + " name: " + name
1057       else:
1058         message = e.message
1059       _six.raise_from(_core._status_to_exception(e.code, message), None)
1060 
1061 
1062 def atan_eager_fallback(x, name=None, ctx=None):
1063   r"""This is the slowpath function for Eager mode.
1064   This is for function atan
1065   """
1066   _ctx = ctx if ctx else _context.context()
1067   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
1068   _inputs_flat = [x]
1069   _attrs = ("T", _attr_T)
1070   _result = _execute.execute(b"Atan", 1, inputs=_inputs_flat, attrs=_attrs,
1071                              ctx=_ctx, name=name)
1072   _execute.record_gradient(
1073       "Atan", _inputs_flat, _attrs, _result, name)
1074   _result, = _result
1075   return _result
1076 
1077 
1078 @tf_export('math.atan2', 'atan2')
1079 @deprecated_endpoints('atan2')
1080 def atan2(y, x, name=None):
1081   r"""Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
1082 
1083   This is the angle \( \theta \in [-\pi, \pi] \) such that
1084   \[ x = r \cos(\theta) \]
1085   and
1086   \[ y = r \sin(\theta) \]
1087   where \(r = \sqrt(x^2 + y^2) \).
1088 
1089   Args:
1090     y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
1091     x: A `Tensor`. Must have the same type as `y`.
1092     name: A name for the operation (optional).
1093 
1094   Returns:
1095     A `Tensor`. Has the same type as `y`.
1096   """
1097   _ctx = _context._context
1098   if _ctx is None or not _ctx._eager_context.is_eager:
1099     _, _, _op = _op_def_lib._apply_op_helper(
1100         "Atan2", y=y, x=x, name=name)
1101     _result = _op.outputs[:]
1102     _inputs_flat = _op.inputs
1103     _attrs = ("T", _op.get_attr("T"))
1104     _execute.record_gradient(
1105       "Atan2", _inputs_flat, _attrs, _result, name)
1106     _result, = _result
1107     return _result
1108 
1109   else:
1110     try:
1111       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1112         _ctx._context_handle, _ctx._eager_context.device_name, "Atan2", name,
1113         _ctx._post_execution_callbacks, y, x)
1114       return _result
1115     except _core._FallbackException:
1116       return atan2_eager_fallback(
1117           y, x, name=name, ctx=_ctx)
1118     except _core._NotOkStatusException as e:
1119       if name is not None:
1120         message = e.message + " name: " + name
1121       else:
1122         message = e.message
1123       _six.raise_from(_core._status_to_exception(e.code, message), None)
1124 
1125 
1126 def atan2_eager_fallback(y, x, name=None, ctx=None):
1127   r"""This is the slowpath function for Eager mode.
1128   This is for function atan2
1129   """
1130   _ctx = ctx if ctx else _context.context()
1131   _attr_T, _inputs_T = _execute.args_to_matching_eager([y, x], _ctx)
1132   (y, x) = _inputs_T
1133   _inputs_flat = [y, x]
1134   _attrs = ("T", _attr_T)
1135   _result = _execute.execute(b"Atan2", 1, inputs=_inputs_flat, attrs=_attrs,
1136                              ctx=_ctx, name=name)
1137   _execute.record_gradient(
1138       "Atan2", _inputs_flat, _attrs, _result, name)
1139   _result, = _result
1140   return _result
1141 
1142 
1143 @tf_export('math.atanh', 'atanh')
1144 @deprecated_endpoints('atanh')
1145 def atanh(x, name=None):
1146   r"""Computes inverse hyperbolic tangent of x element-wise.
1147 
1148   Args:
1149     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
1150     name: A name for the operation (optional).
1151 
1152   Returns:
1153     A `Tensor`. Has the same type as `x`.
1154   """
1155   _ctx = _context._context
1156   if _ctx is None or not _ctx._eager_context.is_eager:
1157     _, _, _op = _op_def_lib._apply_op_helper(
1158         "Atanh", x=x, name=name)
1159     _result = _op.outputs[:]
1160     _inputs_flat = _op.inputs
1161     _attrs = ("T", _op.get_attr("T"))
1162     _execute.record_gradient(
1163       "Atanh", _inputs_flat, _attrs, _result, name)
1164     _result, = _result
1165     return _result
1166 
1167   else:
1168     try:
1169       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1170         _ctx._context_handle, _ctx._eager_context.device_name, "Atanh", name,
1171         _ctx._post_execution_callbacks, x)
1172       return _result
1173     except _core._FallbackException:
1174       return atanh_eager_fallback(
1175           x, name=name, ctx=_ctx)
1176     except _core._NotOkStatusException as e:
1177       if name is not None:
1178         message = e.message + " name: " + name
1179       else:
1180         message = e.message
1181       _six.raise_from(_core._status_to_exception(e.code, message), None)
1182 
1183 
1184 def atanh_eager_fallback(x, name=None, ctx=None):
1185   r"""This is the slowpath function for Eager mode.
1186   This is for function atanh
1187   """
1188   _ctx = ctx if ctx else _context.context()
1189   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
1190   _inputs_flat = [x]
1191   _attrs = ("T", _attr_T)
1192   _result = _execute.execute(b"Atanh", 1, inputs=_inputs_flat, attrs=_attrs,
1193                              ctx=_ctx, name=name)
1194   _execute.record_gradient(
1195       "Atanh", _inputs_flat, _attrs, _result, name)
1196   _result, = _result
1197   return _result
1198 
1199 
1200 def batch_mat_mul(x, y, adj_x=False, adj_y=False, name=None):
1201   r"""Multiplies slices of two tensors in batches.
1202 
1203   Multiplies all slices of `Tensor` `x` and `y` (each slice can be
1204   viewed as an element of a batch), and arranges the individual results
1205   in a single output tensor of the same batch size. Each of the
1206   individual slices can optionally be adjointed (to adjoint a matrix
1207   means to transpose and conjugate it) before multiplication by setting
1208   the `adj_x` or `adj_y` flag to `True`, which are by default `False`.
1209 
1210   The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]`
1211   and `[..., r_y, c_y]`.
1212 
1213   The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where:
1214 
1215       r_o = c_x if adj_x else r_x
1216       c_o = r_y if adj_y else c_y
1217 
1218   It is computed as:
1219 
1220       output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :])
1221 
1222   Args:
1223     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
1224       2-D or higher with shape `[..., r_x, c_x]`.
1225     y: A `Tensor`. Must have the same type as `x`.
1226       2-D or higher with shape `[..., r_y, c_y]`.
1227     adj_x: An optional `bool`. Defaults to `False`.
1228       If `True`, adjoint the slices of `x`. Defaults to `False`.
1229     adj_y: An optional `bool`. Defaults to `False`.
1230       If `True`, adjoint the slices of `y`. Defaults to `False`.
1231     name: A name for the operation (optional).
1232 
1233   Returns:
1234     A `Tensor`. Has the same type as `x`.
1235   """
1236   _ctx = _context._context
1237   if _ctx is None or not _ctx._eager_context.is_eager:
1238     if adj_x is None:
1239       adj_x = False
1240     adj_x = _execute.make_bool(adj_x, "adj_x")
1241     if adj_y is None:
1242       adj_y = False
1243     adj_y = _execute.make_bool(adj_y, "adj_y")
1244     _, _, _op = _op_def_lib._apply_op_helper(
1245         "BatchMatMul", x=x, y=y, adj_x=adj_x, adj_y=adj_y, name=name)
1246     _result = _op.outputs[:]
1247     _inputs_flat = _op.inputs
1248     _attrs = ("T", _op.get_attr("T"), "adj_x", _op.get_attr("adj_x"), "adj_y",
1249               _op.get_attr("adj_y"))
1250     _execute.record_gradient(
1251       "BatchMatMul", _inputs_flat, _attrs, _result, name)
1252     _result, = _result
1253     return _result
1254 
1255   else:
1256     try:
1257       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1258         _ctx._context_handle, _ctx._eager_context.device_name, "BatchMatMul",
1259         name, _ctx._post_execution_callbacks, x, y, "adj_x", adj_x, "adj_y",
1260         adj_y)
1261       return _result
1262     except _core._FallbackException:
1263       return batch_mat_mul_eager_fallback(
1264           x, y, adj_x=adj_x, adj_y=adj_y, name=name, ctx=_ctx)
1265     except _core._NotOkStatusException as e:
1266       if name is not None:
1267         message = e.message + " name: " + name
1268       else:
1269         message = e.message
1270       _six.raise_from(_core._status_to_exception(e.code, message), None)
1271 
1272 
1273 def batch_mat_mul_eager_fallback(x, y, adj_x=False, adj_y=False, name=None, ctx=None):
1274   r"""This is the slowpath function for Eager mode.
1275   This is for function batch_mat_mul
1276   """
1277   _ctx = ctx if ctx else _context.context()
1278   if adj_x is None:
1279     adj_x = False
1280   adj_x = _execute.make_bool(adj_x, "adj_x")
1281   if adj_y is None:
1282     adj_y = False
1283   adj_y = _execute.make_bool(adj_y, "adj_y")
1284   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
1285   (x, y) = _inputs_T
1286   _inputs_flat = [x, y]
1287   _attrs = ("T", _attr_T, "adj_x", adj_x, "adj_y", adj_y)
1288   _result = _execute.execute(b"BatchMatMul", 1, inputs=_inputs_flat,
1289                              attrs=_attrs, ctx=_ctx, name=name)
1290   _execute.record_gradient(
1291       "BatchMatMul", _inputs_flat, _attrs, _result, name)
1292   _result, = _result
1293   return _result
1294 
1295 
1296 def bessel_i0e(x, name=None):
1297   r"""Computes the Bessel i0e function of `x` element-wise.
1298 
1299   Exponentially scaled modified Bessel function of order 0 defined as
1300   `bessel_i0e(x) = exp(-abs(x)) bessel_i0(x)`.
1301 
1302   This function is faster and numerically stabler than `bessel_i0(x)`.
1303 
1304   Args:
1305     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
1306     name: A name for the operation (optional).
1307 
1308   Returns:
1309     A `Tensor`. Has the same type as `x`.
1310   """
1311   _ctx = _context._context
1312   if _ctx is None or not _ctx._eager_context.is_eager:
1313     _, _, _op = _op_def_lib._apply_op_helper(
1314         "BesselI0e", x=x, name=name)
1315     _result = _op.outputs[:]
1316     _inputs_flat = _op.inputs
1317     _attrs = ("T", _op.get_attr("T"))
1318     _execute.record_gradient(
1319       "BesselI0e", _inputs_flat, _attrs, _result, name)
1320     _result, = _result
1321     return _result
1322 
1323   else:
1324     try:
1325       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1326         _ctx._context_handle, _ctx._eager_context.device_name, "BesselI0e",
1327         name, _ctx._post_execution_callbacks, x)
1328       return _result
1329     except _core._FallbackException:
1330       return bessel_i0e_eager_fallback(
1331           x, name=name, ctx=_ctx)
1332     except _core._NotOkStatusException as e:
1333       if name is not None:
1334         message = e.message + " name: " + name
1335       else:
1336         message = e.message
1337       _six.raise_from(_core._status_to_exception(e.code, message), None)
1338 
1339 
1340 def bessel_i0e_eager_fallback(x, name=None, ctx=None):
1341   r"""This is the slowpath function for Eager mode.
1342   This is for function bessel_i0e
1343   """
1344   _ctx = ctx if ctx else _context.context()
1345   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
1346   _inputs_flat = [x]
1347   _attrs = ("T", _attr_T)
1348   _result = _execute.execute(b"BesselI0e", 1, inputs=_inputs_flat,
1349                              attrs=_attrs, ctx=_ctx, name=name)
1350   _execute.record_gradient(
1351       "BesselI0e", _inputs_flat, _attrs, _result, name)
1352   _result, = _result
1353   return _result
1354 
1355 
1356 def bessel_i1e(x, name=None):
1357   r"""Computes the Bessel i1e function of `x` element-wise.
1358 
1359   Exponentially scaled modified Bessel function of order 0 defined as
1360   `bessel_i1e(x) = exp(-abs(x)) bessel_i1(x)`.
1361 
1362   This function is faster and numerically stabler than `bessel_i1(x)`.
1363 
1364   Args:
1365     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
1366     name: A name for the operation (optional).
1367 
1368   Returns:
1369     A `Tensor`. Has the same type as `x`.
1370   """
1371   _ctx = _context._context
1372   if _ctx is None or not _ctx._eager_context.is_eager:
1373     _, _, _op = _op_def_lib._apply_op_helper(
1374         "BesselI1e", x=x, name=name)
1375     _result = _op.outputs[:]
1376     _inputs_flat = _op.inputs
1377     _attrs = ("T", _op.get_attr("T"))
1378     _execute.record_gradient(
1379       "BesselI1e", _inputs_flat, _attrs, _result, name)
1380     _result, = _result
1381     return _result
1382 
1383   else:
1384     try:
1385       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1386         _ctx._context_handle, _ctx._eager_context.device_name, "BesselI1e",
1387         name, _ctx._post_execution_callbacks, x)
1388       return _result
1389     except _core._FallbackException:
1390       return bessel_i1e_eager_fallback(
1391           x, name=name, ctx=_ctx)
1392     except _core._NotOkStatusException as e:
1393       if name is not None:
1394         message = e.message + " name: " + name
1395       else:
1396         message = e.message
1397       _six.raise_from(_core._status_to_exception(e.code, message), None)
1398 
1399 
1400 def bessel_i1e_eager_fallback(x, name=None, ctx=None):
1401   r"""This is the slowpath function for Eager mode.
1402   This is for function bessel_i1e
1403   """
1404   _ctx = ctx if ctx else _context.context()
1405   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
1406   _inputs_flat = [x]
1407   _attrs = ("T", _attr_T)
1408   _result = _execute.execute(b"BesselI1e", 1, inputs=_inputs_flat,
1409                              attrs=_attrs, ctx=_ctx, name=name)
1410   _execute.record_gradient(
1411       "BesselI1e", _inputs_flat, _attrs, _result, name)
1412   _result, = _result
1413   return _result
1414 
1415 
1416 @tf_export('math.betainc', 'betainc')
1417 @deprecated_endpoints('betainc')
1418 def betainc(a, b, x, name=None):
1419   r"""Compute the regularized incomplete beta integral \\(I_x(a, b)\\).
1420 
1421   The regularized incomplete beta integral is defined as:
1422 
1423 
1424   \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\)
1425 
1426   where
1427 
1428 
1429   \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\)
1430 
1431 
1432   is the incomplete beta function and \\(B(a, b)\\) is the *complete*
1433   beta function.
1434 
1435   Args:
1436     a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1437     b: A `Tensor`. Must have the same type as `a`.
1438     x: A `Tensor`. Must have the same type as `a`.
1439     name: A name for the operation (optional).
1440 
1441   Returns:
1442     A `Tensor`. Has the same type as `a`.
1443   """
1444   _ctx = _context._context
1445   if _ctx is None or not _ctx._eager_context.is_eager:
1446     _, _, _op = _op_def_lib._apply_op_helper(
1447         "Betainc", a=a, b=b, x=x, name=name)
1448     _result = _op.outputs[:]
1449     _inputs_flat = _op.inputs
1450     _attrs = ("T", _op.get_attr("T"))
1451     _execute.record_gradient(
1452       "Betainc", _inputs_flat, _attrs, _result, name)
1453     _result, = _result
1454     return _result
1455 
1456   else:
1457     try:
1458       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1459         _ctx._context_handle, _ctx._eager_context.device_name, "Betainc",
1460         name, _ctx._post_execution_callbacks, a, b, x)
1461       return _result
1462     except _core._FallbackException:
1463       return betainc_eager_fallback(
1464           a, b, x, name=name, ctx=_ctx)
1465     except _core._NotOkStatusException as e:
1466       if name is not None:
1467         message = e.message + " name: " + name
1468       else:
1469         message = e.message
1470       _six.raise_from(_core._status_to_exception(e.code, message), None)
1471 
1472 
1473 def betainc_eager_fallback(a, b, x, name=None, ctx=None):
1474   r"""This is the slowpath function for Eager mode.
1475   This is for function betainc
1476   """
1477   _ctx = ctx if ctx else _context.context()
1478   _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b, x], _ctx)
1479   (a, b, x) = _inputs_T
1480   _inputs_flat = [a, b, x]
1481   _attrs = ("T", _attr_T)
1482   _result = _execute.execute(b"Betainc", 1, inputs=_inputs_flat, attrs=_attrs,
1483                              ctx=_ctx, name=name)
1484   _execute.record_gradient(
1485       "Betainc", _inputs_flat, _attrs, _result, name)
1486   _result, = _result
1487   return _result
1488 
1489 
1490 def bincount(arr, size, weights, name=None):
1491   r"""Counts the number of occurrences of each value in an integer array.
1492 
1493   Outputs a vector with length `size` and the same dtype as `weights`. If
1494   `weights` are empty, then index `i` stores the number of times the value `i` is
1495   counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of
1496   the value in `weights` at each index where the corresponding value in `arr` is
1497   `i`.
1498 
1499   Values in `arr` outside of the range [0, size) are ignored.
1500 
1501   Args:
1502     arr: A `Tensor` of type `int32`. int32 `Tensor`.
1503     size: A `Tensor` of type `int32`. non-negative int32 scalar `Tensor`.
1504     weights: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
1505       is an int32, int64, float32, or float64 `Tensor` with the same
1506       shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights
1507       equal to 1.
1508     name: A name for the operation (optional).
1509 
1510   Returns:
1511     A `Tensor`. Has the same type as `weights`.
1512   """
1513   _ctx = _context._context
1514   if _ctx is None or not _ctx._eager_context.is_eager:
1515     _, _, _op = _op_def_lib._apply_op_helper(
1516         "Bincount", arr=arr, size=size, weights=weights, name=name)
1517     _result = _op.outputs[:]
1518     _inputs_flat = _op.inputs
1519     _attrs = ("T", _op.get_attr("T"))
1520     _execute.record_gradient(
1521       "Bincount", _inputs_flat, _attrs, _result, name)
1522     _result, = _result
1523     return _result
1524 
1525   else:
1526     try:
1527       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1528         _ctx._context_handle, _ctx._eager_context.device_name, "Bincount",
1529         name, _ctx._post_execution_callbacks, arr, size, weights)
1530       return _result
1531     except _core._FallbackException:
1532       return bincount_eager_fallback(
1533           arr, size, weights, name=name, ctx=_ctx)
1534     except _core._NotOkStatusException as e:
1535       if name is not None:
1536         message = e.message + " name: " + name
1537       else:
1538         message = e.message
1539       _six.raise_from(_core._status_to_exception(e.code, message), None)
1540 
1541 
1542 def bincount_eager_fallback(arr, size, weights, name=None, ctx=None):
1543   r"""This is the slowpath function for Eager mode.
1544   This is for function bincount
1545   """
1546   _ctx = ctx if ctx else _context.context()
1547   _attr_T, (weights,) = _execute.args_to_matching_eager([weights], _ctx)
1548   arr = _ops.convert_to_tensor(arr, _dtypes.int32)
1549   size = _ops.convert_to_tensor(size, _dtypes.int32)
1550   _inputs_flat = [arr, size, weights]
1551   _attrs = ("T", _attr_T)
1552   _result = _execute.execute(b"Bincount", 1, inputs=_inputs_flat,
1553                              attrs=_attrs, ctx=_ctx, name=name)
1554   _execute.record_gradient(
1555       "Bincount", _inputs_flat, _attrs, _result, name)
1556   _result, = _result
1557   return _result
1558 
1559 
1560 def bucketize(input, boundaries, name=None):
1561   r"""Bucketizes 'input' based on 'boundaries'.
1562 
1563   For example, if the inputs are
1564       boundaries = [0, 10, 100]
1565       input = [[-5, 10000]
1566                [150,   10]
1567                [5,    100]]
1568 
1569   then the output will be
1570       output = [[0, 3]
1571                 [3, 2]
1572                 [1, 3]]
1573 
1574   Args:
1575     input: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
1576       Any shape of Tensor contains with int or float type.
1577     boundaries: A list of `floats`.
1578       A sorted list of floats gives the boundary of the buckets.
1579     name: A name for the operation (optional).
1580 
1581   Returns:
1582     A `Tensor` of type `int32`.
1583   """
1584   _ctx = _context._context
1585   if _ctx is None or not _ctx._eager_context.is_eager:
1586     if not isinstance(boundaries, (list, tuple)):
1587       raise TypeError(
1588           "Expected list for 'boundaries' argument to "
1589           "'bucketize' Op, not %r." % boundaries)
1590     boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries]
1591     _, _, _op = _op_def_lib._apply_op_helper(
1592         "Bucketize", input=input, boundaries=boundaries, name=name)
1593     _result = _op.outputs[:]
1594     _inputs_flat = _op.inputs
1595     _attrs = ("T", _op.get_attr("T"), "boundaries",
1596               _op.get_attr("boundaries"))
1597     _execute.record_gradient(
1598       "Bucketize", _inputs_flat, _attrs, _result, name)
1599     _result, = _result
1600     return _result
1601 
1602   else:
1603     try:
1604       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1605         _ctx._context_handle, _ctx._eager_context.device_name, "Bucketize",
1606         name, _ctx._post_execution_callbacks, input, "boundaries", boundaries)
1607       return _result
1608     except _core._FallbackException:
1609       return bucketize_eager_fallback(
1610           input, boundaries=boundaries, name=name, ctx=_ctx)
1611     except _core._NotOkStatusException as e:
1612       if name is not None:
1613         message = e.message + " name: " + name
1614       else:
1615         message = e.message
1616       _six.raise_from(_core._status_to_exception(e.code, message), None)
1617 
1618 
1619 def bucketize_eager_fallback(input, boundaries, name=None, ctx=None):
1620   r"""This is the slowpath function for Eager mode.
1621   This is for function bucketize
1622   """
1623   _ctx = ctx if ctx else _context.context()
1624   if not isinstance(boundaries, (list, tuple)):
1625     raise TypeError(
1626         "Expected list for 'boundaries' argument to "
1627         "'bucketize' Op, not %r." % boundaries)
1628   boundaries = [_execute.make_float(_f, "boundaries") for _f in boundaries]
1629   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
1630   _inputs_flat = [input]
1631   _attrs = ("T", _attr_T, "boundaries", boundaries)
1632   _result = _execute.execute(b"Bucketize", 1, inputs=_inputs_flat,
1633                              attrs=_attrs, ctx=_ctx, name=name)
1634   _execute.record_gradient(
1635       "Bucketize", _inputs_flat, _attrs, _result, name)
1636   _result, = _result
1637   return _result
1638 
1639 
1640 def cast(x, DstT, Truncate=False, name=None):
1641   r"""Cast x of type SrcT to y of DstT.
1642 
1643   Args:
1644     x: A `Tensor`.
1645     DstT: A `tf.DType`.
1646     Truncate: An optional `bool`. Defaults to `False`.
1647     name: A name for the operation (optional).
1648 
1649   Returns:
1650     A `Tensor` of type `DstT`.
1651   """
1652   _ctx = _context._context
1653   if _ctx is None or not _ctx._eager_context.is_eager:
1654     DstT = _execute.make_type(DstT, "DstT")
1655     if Truncate is None:
1656       Truncate = False
1657     Truncate = _execute.make_bool(Truncate, "Truncate")
1658     _, _, _op = _op_def_lib._apply_op_helper(
1659         "Cast", x=x, DstT=DstT, Truncate=Truncate, name=name)
1660     _result = _op.outputs[:]
1661     _inputs_flat = _op.inputs
1662     _attrs = ("SrcT", _op.get_attr("SrcT"), "DstT", _op.get_attr("DstT"),
1663               "Truncate", _op.get_attr("Truncate"))
1664     _execute.record_gradient(
1665       "Cast", _inputs_flat, _attrs, _result, name)
1666     _result, = _result
1667     return _result
1668 
1669   else:
1670     try:
1671       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1672         _ctx._context_handle, _ctx._eager_context.device_name, "Cast", name,
1673         _ctx._post_execution_callbacks, x, "DstT", DstT, "Truncate", Truncate)
1674       return _result
1675     except _core._FallbackException:
1676       return cast_eager_fallback(
1677           x, DstT=DstT, Truncate=Truncate, name=name, ctx=_ctx)
1678     except _core._NotOkStatusException as e:
1679       if name is not None:
1680         message = e.message + " name: " + name
1681       else:
1682         message = e.message
1683       _six.raise_from(_core._status_to_exception(e.code, message), None)
1684 
1685 
1686 def cast_eager_fallback(x, DstT, Truncate=False, name=None, ctx=None):
1687   r"""This is the slowpath function for Eager mode.
1688   This is for function cast
1689   """
1690   _ctx = ctx if ctx else _context.context()
1691   DstT = _execute.make_type(DstT, "DstT")
1692   if Truncate is None:
1693     Truncate = False
1694   Truncate = _execute.make_bool(Truncate, "Truncate")
1695   _attr_SrcT, (x,) = _execute.args_to_matching_eager([x], _ctx)
1696   _inputs_flat = [x]
1697   _attrs = ("SrcT", _attr_SrcT, "DstT", DstT, "Truncate", Truncate)
1698   _result = _execute.execute(b"Cast", 1, inputs=_inputs_flat, attrs=_attrs,
1699                              ctx=_ctx, name=name)
1700   _execute.record_gradient(
1701       "Cast", _inputs_flat, _attrs, _result, name)
1702   _result, = _result
1703   return _result
1704 
1705 
1706 @tf_export('math.ceil', 'ceil')
1707 @deprecated_endpoints('ceil')
1708 def ceil(x, name=None):
1709   r"""Returns element-wise smallest integer not less than x.
1710 
1711   Args:
1712     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
1713     name: A name for the operation (optional).
1714 
1715   Returns:
1716     A `Tensor`. Has the same type as `x`.
1717   """
1718   _ctx = _context._context
1719   if _ctx is None or not _ctx._eager_context.is_eager:
1720     _, _, _op = _op_def_lib._apply_op_helper(
1721         "Ceil", x=x, name=name)
1722     _result = _op.outputs[:]
1723     _inputs_flat = _op.inputs
1724     _attrs = ("T", _op.get_attr("T"))
1725     _execute.record_gradient(
1726       "Ceil", _inputs_flat, _attrs, _result, name)
1727     _result, = _result
1728     return _result
1729 
1730   else:
1731     try:
1732       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1733         _ctx._context_handle, _ctx._eager_context.device_name, "Ceil", name,
1734         _ctx._post_execution_callbacks, x)
1735       return _result
1736     except _core._FallbackException:
1737       return ceil_eager_fallback(
1738           x, name=name, ctx=_ctx)
1739     except _core._NotOkStatusException as e:
1740       if name is not None:
1741         message = e.message + " name: " + name
1742       else:
1743         message = e.message
1744       _six.raise_from(_core._status_to_exception(e.code, message), None)
1745 
1746 
1747 def ceil_eager_fallback(x, name=None, ctx=None):
1748   r"""This is the slowpath function for Eager mode.
1749   This is for function ceil
1750   """
1751   _ctx = ctx if ctx else _context.context()
1752   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
1753   _inputs_flat = [x]
1754   _attrs = ("T", _attr_T)
1755   _result = _execute.execute(b"Ceil", 1, inputs=_inputs_flat, attrs=_attrs,
1756                              ctx=_ctx, name=name)
1757   _execute.record_gradient(
1758       "Ceil", _inputs_flat, _attrs, _result, name)
1759   _result, = _result
1760   return _result
1761 
1762 
1763 def _clip_by_value(t, clip_value_min, clip_value_max, name=None):
1764   r"""Clips tensor values to a specified min and max.
1765 
1766   Given a tensor `t`, this operation returns a tensor of the same type and
1767   shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
1768   Any values less than `clip_value_min` are set to `clip_value_min`. Any values
1769   greater than `clip_value_max` are set to `clip_value_max`.
1770 
1771   Args:
1772     t: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
1773       A `Tensor`.
1774     clip_value_min: A `Tensor`. Must have the same type as `t`.
1775       A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
1776       as `t`. The minimum value to clip by.
1777     clip_value_max: A `Tensor`. Must have the same type as `t`.
1778       A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
1779       as `t`. The maximum value to clip by.
1780     name: A name for the operation (optional).
1781 
1782   Returns:
1783     A `Tensor`. Has the same type as `t`.
1784   """
1785   _ctx = _context._context
1786   if _ctx is None or not _ctx._eager_context.is_eager:
1787     _, _, _op = _op_def_lib._apply_op_helper(
1788         "ClipByValue", t=t, clip_value_min=clip_value_min,
1789         clip_value_max=clip_value_max, name=name)
1790     _result = _op.outputs[:]
1791     _inputs_flat = _op.inputs
1792     _attrs = ("T", _op.get_attr("T"))
1793     _execute.record_gradient(
1794       "ClipByValue", _inputs_flat, _attrs, _result, name)
1795     _result, = _result
1796     return _result
1797 
1798   else:
1799     try:
1800       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1801         _ctx._context_handle, _ctx._eager_context.device_name, "ClipByValue",
1802         name, _ctx._post_execution_callbacks, t, clip_value_min,
1803         clip_value_max)
1804       return _result
1805     except _core._FallbackException:
1806       return _clip_by_value_eager_fallback(
1807           t, clip_value_min, clip_value_max, name=name, ctx=_ctx)
1808     except _core._NotOkStatusException as e:
1809       if name is not None:
1810         message = e.message + " name: " + name
1811       else:
1812         message = e.message
1813       _six.raise_from(_core._status_to_exception(e.code, message), None)
1814 
1815 
1816 def _clip_by_value_eager_fallback(t, clip_value_min, clip_value_max, name=None, ctx=None):
1817   r"""This is the slowpath function for Eager mode.
1818   This is for function _clip_by_value
1819   """
1820   _ctx = ctx if ctx else _context.context()
1821   _attr_T, _inputs_T = _execute.args_to_matching_eager([t, clip_value_min, clip_value_max], _ctx)
1822   (t, clip_value_min, clip_value_max) = _inputs_T
1823   _inputs_flat = [t, clip_value_min, clip_value_max]
1824   _attrs = ("T", _attr_T)
1825   _result = _execute.execute(b"ClipByValue", 1, inputs=_inputs_flat,
1826                              attrs=_attrs, ctx=_ctx, name=name)
1827   _execute.record_gradient(
1828       "ClipByValue", _inputs_flat, _attrs, _result, name)
1829   _result, = _result
1830   return _result
1831 
1832 
1833 def compare_and_bitpack(input, threshold, name=None):
1834   r"""Compare values of `input` to `threshold` and pack resulting bits into a `uint8`.
1835 
1836   Each comparison returns a boolean `true` (if `input_value > threshold`)
1837   or and `false` otherwise.
1838 
1839   This operation is useful for Locality-Sensitive-Hashing (LSH) and other
1840   algorithms that use hashing approximations of cosine and `L2` distances;
1841   codes can be generated from an input via:
1842 
1843   ```python
1844   codebook_size = 50
1845   codebook_bits = codebook_size * 32
1846   codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits],
1847                              dtype=x.dtype,
1848                              initializer=tf.orthogonal_initializer())
1849   codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.)
1850   codes = tf.bitcast(codes, tf.int32)  # go from uint8 to int32
1851   # now codes has shape x.shape[:-1] + [codebook_size]
1852   ```
1853 
1854   **NOTE**: Currently, the innermost dimension of the tensor must be divisible
1855   by 8.
1856 
1857   Given an `input` shaped `[s0, s1, ..., s_n]`, the output is
1858   a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`.
1859 
1860   Args:
1861     input: A `Tensor`. Must be one of the following types: `bool`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`.
1862       Values to compare against `threshold` and bitpack.
1863     threshold: A `Tensor`. Must have the same type as `input`.
1864       Threshold to compare against.
1865     name: A name for the operation (optional).
1866 
1867   Returns:
1868     A `Tensor` of type `uint8`.
1869   """
1870   _ctx = _context._context
1871   if _ctx is None or not _ctx._eager_context.is_eager:
1872     _, _, _op = _op_def_lib._apply_op_helper(
1873         "CompareAndBitpack", input=input, threshold=threshold, name=name)
1874     _result = _op.outputs[:]
1875     _inputs_flat = _op.inputs
1876     _attrs = ("T", _op.get_attr("T"))
1877     _execute.record_gradient(
1878       "CompareAndBitpack", _inputs_flat, _attrs, _result, name)
1879     _result, = _result
1880     return _result
1881 
1882   else:
1883     try:
1884       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1885         _ctx._context_handle, _ctx._eager_context.device_name,
1886         "CompareAndBitpack", name, _ctx._post_execution_callbacks, input,
1887         threshold)
1888       return _result
1889     except _core._FallbackException:
1890       return compare_and_bitpack_eager_fallback(
1891           input, threshold, name=name, ctx=_ctx)
1892     except _core._NotOkStatusException as e:
1893       if name is not None:
1894         message = e.message + " name: " + name
1895       else:
1896         message = e.message
1897       _six.raise_from(_core._status_to_exception(e.code, message), None)
1898 
1899 
1900 def compare_and_bitpack_eager_fallback(input, threshold, name=None, ctx=None):
1901   r"""This is the slowpath function for Eager mode.
1902   This is for function compare_and_bitpack
1903   """
1904   _ctx = ctx if ctx else _context.context()
1905   _attr_T, _inputs_T = _execute.args_to_matching_eager([input, threshold], _ctx)
1906   (input, threshold) = _inputs_T
1907   _inputs_flat = [input, threshold]
1908   _attrs = ("T", _attr_T)
1909   _result = _execute.execute(b"CompareAndBitpack", 1, inputs=_inputs_flat,
1910                              attrs=_attrs, ctx=_ctx, name=name)
1911   _execute.record_gradient(
1912       "CompareAndBitpack", _inputs_flat, _attrs, _result, name)
1913   _result, = _result
1914   return _result
1915 
1916 
1917 def _complex(real, imag, Tout=_dtypes.complex64, name=None):
1918   r"""Converts two real numbers to a complex number.
1919 
1920   Given a tensor `real` representing the real part of a complex number, and a
1921   tensor `imag` representing the imaginary part of a complex number, this
1922   operation returns complex numbers elementwise of the form \\(a + bj\\), where
1923   *a* represents the `real` part and *b* represents the `imag` part.
1924 
1925   The input tensors `real` and `imag` must have the same shape.
1926 
1927   For example:
1928 
1929   ```
1930   # tensor 'real' is [2.25, 3.25]
1931   # tensor `imag` is [4.75, 5.75]
1932   tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
1933   ```
1934 
1935   Args:
1936     real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1937     imag: A `Tensor`. Must have the same type as `real`.
1938     Tout: An optional `tf.DType` from: `tf.complex64, tf.complex128`. Defaults to `tf.complex64`.
1939     name: A name for the operation (optional).
1940 
1941   Returns:
1942     A `Tensor` of type `Tout`.
1943   """
1944   _ctx = _context._context
1945   if _ctx is None or not _ctx._eager_context.is_eager:
1946     if Tout is None:
1947       Tout = _dtypes.complex64
1948     Tout = _execute.make_type(Tout, "Tout")
1949     _, _, _op = _op_def_lib._apply_op_helper(
1950         "Complex", real=real, imag=imag, Tout=Tout, name=name)
1951     _result = _op.outputs[:]
1952     _inputs_flat = _op.inputs
1953     _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
1954     _execute.record_gradient(
1955       "Complex", _inputs_flat, _attrs, _result, name)
1956     _result, = _result
1957     return _result
1958 
1959   else:
1960     try:
1961       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
1962         _ctx._context_handle, _ctx._eager_context.device_name, "Complex",
1963         name, _ctx._post_execution_callbacks, real, imag, "Tout", Tout)
1964       return _result
1965     except _core._FallbackException:
1966       return _complex_eager_fallback(
1967           real, imag, Tout=Tout, name=name, ctx=_ctx)
1968     except _core._NotOkStatusException as e:
1969       if name is not None:
1970         message = e.message + " name: " + name
1971       else:
1972         message = e.message
1973       _six.raise_from(_core._status_to_exception(e.code, message), None)
1974 
1975 
1976 def _complex_eager_fallback(real, imag, Tout=_dtypes.complex64, name=None, ctx=None):
1977   r"""This is the slowpath function for Eager mode.
1978   This is for function _complex
1979   """
1980   _ctx = ctx if ctx else _context.context()
1981   if Tout is None:
1982     Tout = _dtypes.complex64
1983   Tout = _execute.make_type(Tout, "Tout")
1984   _attr_T, _inputs_T = _execute.args_to_matching_eager([real, imag], _ctx, _dtypes.float32)
1985   (real, imag) = _inputs_T
1986   _inputs_flat = [real, imag]
1987   _attrs = ("T", _attr_T, "Tout", Tout)
1988   _result = _execute.execute(b"Complex", 1, inputs=_inputs_flat, attrs=_attrs,
1989                              ctx=_ctx, name=name)
1990   _execute.record_gradient(
1991       "Complex", _inputs_flat, _attrs, _result, name)
1992   _result, = _result
1993   return _result
1994 
1995 
1996 def complex_abs(x, Tout=_dtypes.float32, name=None):
1997   r"""Computes the complex absolute value of a tensor.
1998 
1999   Given a tensor `x` of complex numbers, this operation returns a tensor of type
2000   `float` or `double` that is the absolute value of each element in `x`. All
2001   elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute
2002   value is computed as \\( \sqrt{a^2 + b^2}\\).
2003 
2004   Args:
2005     x: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
2006     Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
2007     name: A name for the operation (optional).
2008 
2009   Returns:
2010     A `Tensor` of type `Tout`.
2011   """
2012   _ctx = _context._context
2013   if _ctx is None or not _ctx._eager_context.is_eager:
2014     if Tout is None:
2015       Tout = _dtypes.float32
2016     Tout = _execute.make_type(Tout, "Tout")
2017     _, _, _op = _op_def_lib._apply_op_helper(
2018         "ComplexAbs", x=x, Tout=Tout, name=name)
2019     _result = _op.outputs[:]
2020     _inputs_flat = _op.inputs
2021     _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
2022     _execute.record_gradient(
2023       "ComplexAbs", _inputs_flat, _attrs, _result, name)
2024     _result, = _result
2025     return _result
2026 
2027   else:
2028     try:
2029       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2030         _ctx._context_handle, _ctx._eager_context.device_name, "ComplexAbs",
2031         name, _ctx._post_execution_callbacks, x, "Tout", Tout)
2032       return _result
2033     except _core._FallbackException:
2034       return complex_abs_eager_fallback(
2035           x, Tout=Tout, name=name, ctx=_ctx)
2036     except _core._NotOkStatusException as e:
2037       if name is not None:
2038         message = e.message + " name: " + name
2039       else:
2040         message = e.message
2041       _six.raise_from(_core._status_to_exception(e.code, message), None)
2042 
2043 
2044 def complex_abs_eager_fallback(x, Tout=_dtypes.float32, name=None, ctx=None):
2045   r"""This is the slowpath function for Eager mode.
2046   This is for function complex_abs
2047   """
2048   _ctx = ctx if ctx else _context.context()
2049   if Tout is None:
2050     Tout = _dtypes.float32
2051   Tout = _execute.make_type(Tout, "Tout")
2052   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx, _dtypes.complex64)
2053   _inputs_flat = [x]
2054   _attrs = ("T", _attr_T, "Tout", Tout)
2055   _result = _execute.execute(b"ComplexAbs", 1, inputs=_inputs_flat,
2056                              attrs=_attrs, ctx=_ctx, name=name)
2057   _execute.record_gradient(
2058       "ComplexAbs", _inputs_flat, _attrs, _result, name)
2059   _result, = _result
2060   return _result
2061 
2062 
2063 def conj(input, name=None):
2064   r"""Returns the complex conjugate of a complex number.
2065 
2066   Given a tensor `input` of complex numbers, this operation returns a tensor of
2067   complex numbers that are the complex conjugate of each element in `input`. The
2068   complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
2069   real part and *b* is the imaginary part.
2070 
2071   The complex conjugate returned by this operation is of the form \\(a - bj\\).
2072 
2073   For example:
2074 
2075   ```
2076   # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
2077   tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
2078   ```
2079 
2080   Args:
2081     input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`, `variant`.
2082     name: A name for the operation (optional).
2083 
2084   Returns:
2085     A `Tensor`. Has the same type as `input`.
2086   """
2087   _ctx = _context._context
2088   if _ctx is None or not _ctx._eager_context.is_eager:
2089     _, _, _op = _op_def_lib._apply_op_helper(
2090         "Conj", input=input, name=name)
2091     _result = _op.outputs[:]
2092     _inputs_flat = _op.inputs
2093     _attrs = ("T", _op.get_attr("T"))
2094     _execute.record_gradient(
2095       "Conj", _inputs_flat, _attrs, _result, name)
2096     _result, = _result
2097     return _result
2098 
2099   else:
2100     try:
2101       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2102         _ctx._context_handle, _ctx._eager_context.device_name, "Conj", name,
2103         _ctx._post_execution_callbacks, input)
2104       return _result
2105     except _core._FallbackException:
2106       return conj_eager_fallback(
2107           input, name=name, ctx=_ctx)
2108     except _core._NotOkStatusException as e:
2109       if name is not None:
2110         message = e.message + " name: " + name
2111       else:
2112         message = e.message
2113       _six.raise_from(_core._status_to_exception(e.code, message), None)
2114 
2115 
2116 def conj_eager_fallback(input, name=None, ctx=None):
2117   r"""This is the slowpath function for Eager mode.
2118   This is for function conj
2119   """
2120   _ctx = ctx if ctx else _context.context()
2121   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
2122   _inputs_flat = [input]
2123   _attrs = ("T", _attr_T)
2124   _result = _execute.execute(b"Conj", 1, inputs=_inputs_flat, attrs=_attrs,
2125                              ctx=_ctx, name=name)
2126   _execute.record_gradient(
2127       "Conj", _inputs_flat, _attrs, _result, name)
2128   _result, = _result
2129   return _result
2130 
2131 
2132 @tf_export('math.cos', 'cos')
2133 @deprecated_endpoints('cos')
2134 def cos(x, name=None):
2135   r"""Computes cos of x element-wise.
2136 
2137   Args:
2138     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
2139     name: A name for the operation (optional).
2140 
2141   Returns:
2142     A `Tensor`. Has the same type as `x`.
2143   """
2144   _ctx = _context._context
2145   if _ctx is None or not _ctx._eager_context.is_eager:
2146     _, _, _op = _op_def_lib._apply_op_helper(
2147         "Cos", x=x, name=name)
2148     _result = _op.outputs[:]
2149     _inputs_flat = _op.inputs
2150     _attrs = ("T", _op.get_attr("T"))
2151     _execute.record_gradient(
2152       "Cos", _inputs_flat, _attrs, _result, name)
2153     _result, = _result
2154     return _result
2155 
2156   else:
2157     try:
2158       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2159         _ctx._context_handle, _ctx._eager_context.device_name, "Cos", name,
2160         _ctx._post_execution_callbacks, x)
2161       return _result
2162     except _core._FallbackException:
2163       return cos_eager_fallback(
2164           x, name=name, ctx=_ctx)
2165     except _core._NotOkStatusException as e:
2166       if name is not None:
2167         message = e.message + " name: " + name
2168       else:
2169         message = e.message
2170       _six.raise_from(_core._status_to_exception(e.code, message), None)
2171 
2172 
2173 def cos_eager_fallback(x, name=None, ctx=None):
2174   r"""This is the slowpath function for Eager mode.
2175   This is for function cos
2176   """
2177   _ctx = ctx if ctx else _context.context()
2178   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2179   _inputs_flat = [x]
2180   _attrs = ("T", _attr_T)
2181   _result = _execute.execute(b"Cos", 1, inputs=_inputs_flat, attrs=_attrs,
2182                              ctx=_ctx, name=name)
2183   _execute.record_gradient(
2184       "Cos", _inputs_flat, _attrs, _result, name)
2185   _result, = _result
2186   return _result
2187 
2188 
2189 @tf_export('math.cosh', 'cosh')
2190 @deprecated_endpoints('cosh')
2191 def cosh(x, name=None):
2192   r"""Computes hyperbolic cosine of x element-wise.
2193 
2194   Args:
2195     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
2196     name: A name for the operation (optional).
2197 
2198   Returns:
2199     A `Tensor`. Has the same type as `x`.
2200   """
2201   _ctx = _context._context
2202   if _ctx is None or not _ctx._eager_context.is_eager:
2203     _, _, _op = _op_def_lib._apply_op_helper(
2204         "Cosh", x=x, name=name)
2205     _result = _op.outputs[:]
2206     _inputs_flat = _op.inputs
2207     _attrs = ("T", _op.get_attr("T"))
2208     _execute.record_gradient(
2209       "Cosh", _inputs_flat, _attrs, _result, name)
2210     _result, = _result
2211     return _result
2212 
2213   else:
2214     try:
2215       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2216         _ctx._context_handle, _ctx._eager_context.device_name, "Cosh", name,
2217         _ctx._post_execution_callbacks, x)
2218       return _result
2219     except _core._FallbackException:
2220       return cosh_eager_fallback(
2221           x, name=name, ctx=_ctx)
2222     except _core._NotOkStatusException as e:
2223       if name is not None:
2224         message = e.message + " name: " + name
2225       else:
2226         message = e.message
2227       _six.raise_from(_core._status_to_exception(e.code, message), None)
2228 
2229 
2230 def cosh_eager_fallback(x, name=None, ctx=None):
2231   r"""This is the slowpath function for Eager mode.
2232   This is for function cosh
2233   """
2234   _ctx = ctx if ctx else _context.context()
2235   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2236   _inputs_flat = [x]
2237   _attrs = ("T", _attr_T)
2238   _result = _execute.execute(b"Cosh", 1, inputs=_inputs_flat, attrs=_attrs,
2239                              ctx=_ctx, name=name)
2240   _execute.record_gradient(
2241       "Cosh", _inputs_flat, _attrs, _result, name)
2242   _result, = _result
2243   return _result
2244 
2245 
2246 @tf_export('linalg.cross', 'cross')
2247 @deprecated_endpoints('cross')
2248 def cross(a, b, name=None):
2249   r"""Compute the pairwise cross product.
2250 
2251   `a` and `b` must be the same shape; they can either be simple 3-element vectors,
2252   or any shape where the innermost dimension is 3. In the latter case, each pair
2253   of corresponding 3-element vectors is cross-multiplied independently.
2254 
2255   Args:
2256     a: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
2257       A tensor containing 3-element vectors.
2258     b: A `Tensor`. Must have the same type as `a`.
2259       Another tensor, of same type and shape as `a`.
2260     name: A name for the operation (optional).
2261 
2262   Returns:
2263     A `Tensor`. Has the same type as `a`.
2264   """
2265   _ctx = _context._context
2266   if _ctx is None or not _ctx._eager_context.is_eager:
2267     _, _, _op = _op_def_lib._apply_op_helper(
2268         "Cross", a=a, b=b, name=name)
2269     _result = _op.outputs[:]
2270     _inputs_flat = _op.inputs
2271     _attrs = ("T", _op.get_attr("T"))
2272     _execute.record_gradient(
2273       "Cross", _inputs_flat, _attrs, _result, name)
2274     _result, = _result
2275     return _result
2276 
2277   else:
2278     try:
2279       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2280         _ctx._context_handle, _ctx._eager_context.device_name, "Cross", name,
2281         _ctx._post_execution_callbacks, a, b)
2282       return _result
2283     except _core._FallbackException:
2284       return cross_eager_fallback(
2285           a, b, name=name, ctx=_ctx)
2286     except _core._NotOkStatusException as e:
2287       if name is not None:
2288         message = e.message + " name: " + name
2289       else:
2290         message = e.message
2291       _six.raise_from(_core._status_to_exception(e.code, message), None)
2292 
2293 
2294 def cross_eager_fallback(a, b, name=None, ctx=None):
2295   r"""This is the slowpath function for Eager mode.
2296   This is for function cross
2297   """
2298   _ctx = ctx if ctx else _context.context()
2299   _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], _ctx)
2300   (a, b) = _inputs_T
2301   _inputs_flat = [a, b]
2302   _attrs = ("T", _attr_T)
2303   _result = _execute.execute(b"Cross", 1, inputs=_inputs_flat, attrs=_attrs,
2304                              ctx=_ctx, name=name)
2305   _execute.record_gradient(
2306       "Cross", _inputs_flat, _attrs, _result, name)
2307   _result, = _result
2308   return _result
2309 
2310 
2311 def cumprod(x, axis, exclusive=False, reverse=False, name=None):
2312   r"""Compute the cumulative product of the tensor `x` along `axis`.
2313 
2314   By default, this op performs an inclusive cumprod, which means that the first
2315   element of the input is identical to the first element of the output:
2316 
2317   ```python
2318   tf.cumprod([a, b, c])  # => [a, a * b, a * b * c]
2319   ```
2320 
2321   By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
2322   performed instead:
2323 
2324   ```python
2325   tf.cumprod([a, b, c], exclusive=True)  # => [1, a, a * b]
2326   ```
2327 
2328   By setting the `reverse` kwarg to `True`, the cumprod is performed in the
2329   opposite direction:
2330 
2331   ```python
2332   tf.cumprod([a, b, c], reverse=True)  # => [a * b * c, b * c, c]
2333   ```
2334 
2335   This is more efficient than using separate `tf.reverse` ops.
2336 
2337   The `reverse` and `exclusive` kwargs can also be combined:
2338 
2339   ```python
2340   tf.cumprod([a, b, c], exclusive=True, reverse=True)  # => [b * c, c, 1]
2341   ```
2342 
2343   Args:
2344     x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
2345       A `Tensor`. Must be one of the following types: `float32`, `float64`,
2346       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
2347       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
2348     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2349       A `Tensor` of type `int32` (default: 0). Must be in the range
2350       `[-rank(x), rank(x))`.
2351     exclusive: An optional `bool`. Defaults to `False`.
2352       If `True`, perform exclusive cumprod.
2353     reverse: An optional `bool`. Defaults to `False`.
2354       A `bool` (default: False).
2355     name: A name for the operation (optional).
2356 
2357   Returns:
2358     A `Tensor`. Has the same type as `x`.
2359   """
2360   _ctx = _context._context
2361   if _ctx is None or not _ctx._eager_context.is_eager:
2362     if exclusive is None:
2363       exclusive = False
2364     exclusive = _execute.make_bool(exclusive, "exclusive")
2365     if reverse is None:
2366       reverse = False
2367     reverse = _execute.make_bool(reverse, "reverse")
2368     _, _, _op = _op_def_lib._apply_op_helper(
2369         "Cumprod", x=x, axis=axis, exclusive=exclusive, reverse=reverse,
2370         name=name)
2371     _result = _op.outputs[:]
2372     _inputs_flat = _op.inputs
2373     _attrs = ("exclusive", _op.get_attr("exclusive"), "reverse",
2374               _op.get_attr("reverse"), "T", _op.get_attr("T"), "Tidx",
2375               _op.get_attr("Tidx"))
2376     _execute.record_gradient(
2377       "Cumprod", _inputs_flat, _attrs, _result, name)
2378     _result, = _result
2379     return _result
2380 
2381   else:
2382     try:
2383       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2384         _ctx._context_handle, _ctx._eager_context.device_name, "Cumprod",
2385         name, _ctx._post_execution_callbacks, x, axis, "exclusive", exclusive,
2386         "reverse", reverse)
2387       return _result
2388     except _core._FallbackException:
2389       return cumprod_eager_fallback(
2390           x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx)
2391     except _core._NotOkStatusException as e:
2392       if name is not None:
2393         message = e.message + " name: " + name
2394       else:
2395         message = e.message
2396       _six.raise_from(_core._status_to_exception(e.code, message), None)
2397 
2398 
2399 def cumprod_eager_fallback(x, axis, exclusive=False, reverse=False, name=None, ctx=None):
2400   r"""This is the slowpath function for Eager mode.
2401   This is for function cumprod
2402   """
2403   _ctx = ctx if ctx else _context.context()
2404   if exclusive is None:
2405     exclusive = False
2406   exclusive = _execute.make_bool(exclusive, "exclusive")
2407   if reverse is None:
2408     reverse = False
2409   reverse = _execute.make_bool(reverse, "reverse")
2410   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2411   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
2412   _inputs_flat = [x, axis]
2413   _attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx",
2414   _attr_Tidx)
2415   _result = _execute.execute(b"Cumprod", 1, inputs=_inputs_flat, attrs=_attrs,
2416                              ctx=_ctx, name=name)
2417   _execute.record_gradient(
2418       "Cumprod", _inputs_flat, _attrs, _result, name)
2419   _result, = _result
2420   return _result
2421 
2422 
2423 def cumsum(x, axis, exclusive=False, reverse=False, name=None):
2424   r"""Compute the cumulative sum of the tensor `x` along `axis`.
2425 
2426   By default, this op performs an inclusive cumsum, which means that the first
2427   element of the input is identical to the first element of the output:
2428 
2429   ```python
2430   tf.cumsum([a, b, c])  # => [a, a + b, a + b + c]
2431   ```
2432 
2433   By setting the `exclusive` kwarg to `True`, an exclusive cumsum is
2434   performed instead:
2435 
2436   ```python
2437   tf.cumsum([a, b, c], exclusive=True)  # => [0, a, a + b]
2438   ```
2439 
2440   By setting the `reverse` kwarg to `True`, the cumsum is performed in the
2441   opposite direction:
2442 
2443   ```python
2444   tf.cumsum([a, b, c], reverse=True)  # => [a + b + c, b + c, c]
2445   ```
2446 
2447   This is more efficient than using separate `tf.reverse` ops.
2448 
2449   The `reverse` and `exclusive` kwargs can also be combined:
2450 
2451   ```python
2452   tf.cumsum([a, b, c], exclusive=True, reverse=True)  # => [b + c, c, 0]
2453   ```
2454 
2455   Args:
2456     x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
2457       A `Tensor`. Must be one of the following types: `float32`, `float64`,
2458       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
2459       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
2460     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2461       A `Tensor` of type `int32` (default: 0). Must be in the range
2462       `[-rank(x), rank(x))`.
2463     exclusive: An optional `bool`. Defaults to `False`.
2464       If `True`, perform exclusive cumsum.
2465     reverse: An optional `bool`. Defaults to `False`.
2466       A `bool` (default: False).
2467     name: A name for the operation (optional).
2468 
2469   Returns:
2470     A `Tensor`. Has the same type as `x`.
2471   """
2472   _ctx = _context._context
2473   if _ctx is None or not _ctx._eager_context.is_eager:
2474     if exclusive is None:
2475       exclusive = False
2476     exclusive = _execute.make_bool(exclusive, "exclusive")
2477     if reverse is None:
2478       reverse = False
2479     reverse = _execute.make_bool(reverse, "reverse")
2480     _, _, _op = _op_def_lib._apply_op_helper(
2481         "Cumsum", x=x, axis=axis, exclusive=exclusive, reverse=reverse,
2482         name=name)
2483     _result = _op.outputs[:]
2484     _inputs_flat = _op.inputs
2485     _attrs = ("exclusive", _op.get_attr("exclusive"), "reverse",
2486               _op.get_attr("reverse"), "T", _op.get_attr("T"), "Tidx",
2487               _op.get_attr("Tidx"))
2488     _execute.record_gradient(
2489       "Cumsum", _inputs_flat, _attrs, _result, name)
2490     _result, = _result
2491     return _result
2492 
2493   else:
2494     try:
2495       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2496         _ctx._context_handle, _ctx._eager_context.device_name, "Cumsum", name,
2497         _ctx._post_execution_callbacks, x, axis, "exclusive", exclusive,
2498         "reverse", reverse)
2499       return _result
2500     except _core._FallbackException:
2501       return cumsum_eager_fallback(
2502           x, axis, exclusive=exclusive, reverse=reverse, name=name, ctx=_ctx)
2503     except _core._NotOkStatusException as e:
2504       if name is not None:
2505         message = e.message + " name: " + name
2506       else:
2507         message = e.message
2508       _six.raise_from(_core._status_to_exception(e.code, message), None)
2509 
2510 
2511 def cumsum_eager_fallback(x, axis, exclusive=False, reverse=False, name=None, ctx=None):
2512   r"""This is the slowpath function for Eager mode.
2513   This is for function cumsum
2514   """
2515   _ctx = ctx if ctx else _context.context()
2516   if exclusive is None:
2517     exclusive = False
2518   exclusive = _execute.make_bool(exclusive, "exclusive")
2519   if reverse is None:
2520     reverse = False
2521   reverse = _execute.make_bool(reverse, "reverse")
2522   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2523   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
2524   _inputs_flat = [x, axis]
2525   _attrs = ("exclusive", exclusive, "reverse", reverse, "T", _attr_T, "Tidx",
2526   _attr_Tidx)
2527   _result = _execute.execute(b"Cumsum", 1, inputs=_inputs_flat, attrs=_attrs,
2528                              ctx=_ctx, name=name)
2529   _execute.record_gradient(
2530       "Cumsum", _inputs_flat, _attrs, _result, name)
2531   _result, = _result
2532   return _result
2533 
2534 
2535 @tf_export('math.digamma', 'digamma')
2536 @deprecated_endpoints('digamma')
2537 def digamma(x, name=None):
2538   r"""Computes Psi, the derivative of Lgamma (the log of the absolute value of
2539 
2540   `Gamma(x)`), element-wise.
2541 
2542   Args:
2543     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
2544     name: A name for the operation (optional).
2545 
2546   Returns:
2547     A `Tensor`. Has the same type as `x`.
2548   """
2549   _ctx = _context._context
2550   if _ctx is None or not _ctx._eager_context.is_eager:
2551     _, _, _op = _op_def_lib._apply_op_helper(
2552         "Digamma", x=x, name=name)
2553     _result = _op.outputs[:]
2554     _inputs_flat = _op.inputs
2555     _attrs = ("T", _op.get_attr("T"))
2556     _execute.record_gradient(
2557       "Digamma", _inputs_flat, _attrs, _result, name)
2558     _result, = _result
2559     return _result
2560 
2561   else:
2562     try:
2563       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2564         _ctx._context_handle, _ctx._eager_context.device_name, "Digamma",
2565         name, _ctx._post_execution_callbacks, x)
2566       return _result
2567     except _core._FallbackException:
2568       return digamma_eager_fallback(
2569           x, name=name, ctx=_ctx)
2570     except _core._NotOkStatusException as e:
2571       if name is not None:
2572         message = e.message + " name: " + name
2573       else:
2574         message = e.message
2575       _six.raise_from(_core._status_to_exception(e.code, message), None)
2576 
2577 
2578 def digamma_eager_fallback(x, name=None, ctx=None):
2579   r"""This is the slowpath function for Eager mode.
2580   This is for function digamma
2581   """
2582   _ctx = ctx if ctx else _context.context()
2583   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2584   _inputs_flat = [x]
2585   _attrs = ("T", _attr_T)
2586   _result = _execute.execute(b"Digamma", 1, inputs=_inputs_flat, attrs=_attrs,
2587                              ctx=_ctx, name=name)
2588   _execute.record_gradient(
2589       "Digamma", _inputs_flat, _attrs, _result, name)
2590   _result, = _result
2591   return _result
2592 
2593 
2594 def div(x, y, name=None):
2595   r"""Returns x / y element-wise.
2596 
2597   *NOTE*: `Div` supports broadcasting. More about broadcasting
2598   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
2599 
2600   Args:
2601     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
2602     y: A `Tensor`. Must have the same type as `x`.
2603     name: A name for the operation (optional).
2604 
2605   Returns:
2606     A `Tensor`. Has the same type as `x`.
2607   """
2608   _ctx = _context._context
2609   if _ctx is None or not _ctx._eager_context.is_eager:
2610     _, _, _op = _op_def_lib._apply_op_helper(
2611         "Div", x=x, y=y, name=name)
2612     _result = _op.outputs[:]
2613     _inputs_flat = _op.inputs
2614     _attrs = ("T", _op.get_attr("T"))
2615     _execute.record_gradient(
2616       "Div", _inputs_flat, _attrs, _result, name)
2617     _result, = _result
2618     return _result
2619 
2620   else:
2621     try:
2622       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2623         _ctx._context_handle, _ctx._eager_context.device_name, "Div", name,
2624         _ctx._post_execution_callbacks, x, y)
2625       return _result
2626     except _core._FallbackException:
2627       return div_eager_fallback(
2628           x, y, name=name, ctx=_ctx)
2629     except _core._NotOkStatusException as e:
2630       if name is not None:
2631         message = e.message + " name: " + name
2632       else:
2633         message = e.message
2634       _six.raise_from(_core._status_to_exception(e.code, message), None)
2635 
2636 
2637 def div_eager_fallback(x, y, name=None, ctx=None):
2638   r"""This is the slowpath function for Eager mode.
2639   This is for function div
2640   """
2641   _ctx = ctx if ctx else _context.context()
2642   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
2643   (x, y) = _inputs_T
2644   _inputs_flat = [x, y]
2645   _attrs = ("T", _attr_T)
2646   _result = _execute.execute(b"Div", 1, inputs=_inputs_flat, attrs=_attrs,
2647                              ctx=_ctx, name=name)
2648   _execute.record_gradient(
2649       "Div", _inputs_flat, _attrs, _result, name)
2650   _result, = _result
2651   return _result
2652 
2653 
2654 def div_no_nan(x, y, name=None):
2655   r"""Returns 0 if the denominator is zero.
2656 
2657   
2658   *NOTE*: `DivNoNan` supports broadcasting. More about broadcasting
2659   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
2660 
2661   Args:
2662     x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
2663     y: A `Tensor`. Must have the same type as `x`.
2664     name: A name for the operation (optional).
2665 
2666   Returns:
2667     A `Tensor`. Has the same type as `x`.
2668   """
2669   _ctx = _context._context
2670   if _ctx is None or not _ctx._eager_context.is_eager:
2671     _, _, _op = _op_def_lib._apply_op_helper(
2672         "DivNoNan", x=x, y=y, name=name)
2673     _result = _op.outputs[:]
2674     _inputs_flat = _op.inputs
2675     _attrs = ("T", _op.get_attr("T"))
2676     _execute.record_gradient(
2677       "DivNoNan", _inputs_flat, _attrs, _result, name)
2678     _result, = _result
2679     return _result
2680 
2681   else:
2682     try:
2683       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2684         _ctx._context_handle, _ctx._eager_context.device_name, "DivNoNan",
2685         name, _ctx._post_execution_callbacks, x, y)
2686       return _result
2687     except _core._FallbackException:
2688       return div_no_nan_eager_fallback(
2689           x, y, name=name, ctx=_ctx)
2690     except _core._NotOkStatusException as e:
2691       if name is not None:
2692         message = e.message + " name: " + name
2693       else:
2694         message = e.message
2695       _six.raise_from(_core._status_to_exception(e.code, message), None)
2696 
2697 
2698 def div_no_nan_eager_fallback(x, y, name=None, ctx=None):
2699   r"""This is the slowpath function for Eager mode.
2700   This is for function div_no_nan
2701   """
2702   _ctx = ctx if ctx else _context.context()
2703   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
2704   (x, y) = _inputs_T
2705   _inputs_flat = [x, y]
2706   _attrs = ("T", _attr_T)
2707   _result = _execute.execute(b"DivNoNan", 1, inputs=_inputs_flat,
2708                              attrs=_attrs, ctx=_ctx, name=name)
2709   _execute.record_gradient(
2710       "DivNoNan", _inputs_flat, _attrs, _result, name)
2711   _result, = _result
2712   return _result
2713 
2714 
2715 @tf_export('math.equal', 'equal')
2716 @deprecated_endpoints('equal')
2717 def equal(x, y, name=None):
2718   r"""Returns the truth value of (x == y) element-wise.
2719 
2720   *NOTE*: `math.equal` supports broadcasting. More about broadcasting
2721   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
2722 
2723   Args:
2724     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `quint8`, `qint8`, `qint32`, `string`, `bool`, `complex128`.
2725     y: A `Tensor`. Must have the same type as `x`.
2726     name: A name for the operation (optional).
2727 
2728   Returns:
2729     A `Tensor` of type `bool`.
2730   """
2731   _ctx = _context._context
2732   if _ctx is None or not _ctx._eager_context.is_eager:
2733     _, _, _op = _op_def_lib._apply_op_helper(
2734         "Equal", x=x, y=y, name=name)
2735     _result = _op.outputs[:]
2736     _inputs_flat = _op.inputs
2737     _attrs = ("T", _op.get_attr("T"))
2738     _execute.record_gradient(
2739       "Equal", _inputs_flat, _attrs, _result, name)
2740     _result, = _result
2741     return _result
2742 
2743   else:
2744     try:
2745       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2746         _ctx._context_handle, _ctx._eager_context.device_name, "Equal", name,
2747         _ctx._post_execution_callbacks, x, y)
2748       return _result
2749     except _core._FallbackException:
2750       return equal_eager_fallback(
2751           x, y, name=name, ctx=_ctx)
2752     except _core._NotOkStatusException as e:
2753       if name is not None:
2754         message = e.message + " name: " + name
2755       else:
2756         message = e.message
2757       _six.raise_from(_core._status_to_exception(e.code, message), None)
2758 
2759 
2760 def equal_eager_fallback(x, y, name=None, ctx=None):
2761   r"""This is the slowpath function for Eager mode.
2762   This is for function equal
2763   """
2764   _ctx = ctx if ctx else _context.context()
2765   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
2766   (x, y) = _inputs_T
2767   _inputs_flat = [x, y]
2768   _attrs = ("T", _attr_T)
2769   _result = _execute.execute(b"Equal", 1, inputs=_inputs_flat, attrs=_attrs,
2770                              ctx=_ctx, name=name)
2771   _execute.record_gradient(
2772       "Equal", _inputs_flat, _attrs, _result, name)
2773   _result, = _result
2774   return _result
2775 
2776 
2777 def erf(x, name=None):
2778   r"""Computes the Gauss error function of `x` element-wise.
2779 
2780   Args:
2781     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
2782     name: A name for the operation (optional).
2783 
2784   Returns:
2785     A `Tensor`. Has the same type as `x`.
2786   """
2787   _ctx = _context._context
2788   if _ctx is None or not _ctx._eager_context.is_eager:
2789     _, _, _op = _op_def_lib._apply_op_helper(
2790         "Erf", x=x, name=name)
2791     _result = _op.outputs[:]
2792     _inputs_flat = _op.inputs
2793     _attrs = ("T", _op.get_attr("T"))
2794     _execute.record_gradient(
2795       "Erf", _inputs_flat, _attrs, _result, name)
2796     _result, = _result
2797     return _result
2798 
2799   else:
2800     try:
2801       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2802         _ctx._context_handle, _ctx._eager_context.device_name, "Erf", name,
2803         _ctx._post_execution_callbacks, x)
2804       return _result
2805     except _core._FallbackException:
2806       return erf_eager_fallback(
2807           x, name=name, ctx=_ctx)
2808     except _core._NotOkStatusException as e:
2809       if name is not None:
2810         message = e.message + " name: " + name
2811       else:
2812         message = e.message
2813       _six.raise_from(_core._status_to_exception(e.code, message), None)
2814 
2815 
2816 def erf_eager_fallback(x, name=None, ctx=None):
2817   r"""This is the slowpath function for Eager mode.
2818   This is for function erf
2819   """
2820   _ctx = ctx if ctx else _context.context()
2821   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2822   _inputs_flat = [x]
2823   _attrs = ("T", _attr_T)
2824   _result = _execute.execute(b"Erf", 1, inputs=_inputs_flat, attrs=_attrs,
2825                              ctx=_ctx, name=name)
2826   _execute.record_gradient(
2827       "Erf", _inputs_flat, _attrs, _result, name)
2828   _result, = _result
2829   return _result
2830 
2831 
2832 @tf_export('math.erfc', 'erfc')
2833 @deprecated_endpoints('erfc')
2834 def erfc(x, name=None):
2835   r"""Computes the complementary error function of `x` element-wise.
2836 
2837   Args:
2838     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
2839     name: A name for the operation (optional).
2840 
2841   Returns:
2842     A `Tensor`. Has the same type as `x`.
2843   """
2844   _ctx = _context._context
2845   if _ctx is None or not _ctx._eager_context.is_eager:
2846     _, _, _op = _op_def_lib._apply_op_helper(
2847         "Erfc", x=x, name=name)
2848     _result = _op.outputs[:]
2849     _inputs_flat = _op.inputs
2850     _attrs = ("T", _op.get_attr("T"))
2851     _execute.record_gradient(
2852       "Erfc", _inputs_flat, _attrs, _result, name)
2853     _result, = _result
2854     return _result
2855 
2856   else:
2857     try:
2858       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2859         _ctx._context_handle, _ctx._eager_context.device_name, "Erfc", name,
2860         _ctx._post_execution_callbacks, x)
2861       return _result
2862     except _core._FallbackException:
2863       return erfc_eager_fallback(
2864           x, name=name, ctx=_ctx)
2865     except _core._NotOkStatusException as e:
2866       if name is not None:
2867         message = e.message + " name: " + name
2868       else:
2869         message = e.message
2870       _six.raise_from(_core._status_to_exception(e.code, message), None)
2871 
2872 
2873 def erfc_eager_fallback(x, name=None, ctx=None):
2874   r"""This is the slowpath function for Eager mode.
2875   This is for function erfc
2876   """
2877   _ctx = ctx if ctx else _context.context()
2878   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2879   _inputs_flat = [x]
2880   _attrs = ("T", _attr_T)
2881   _result = _execute.execute(b"Erfc", 1, inputs=_inputs_flat, attrs=_attrs,
2882                              ctx=_ctx, name=name)
2883   _execute.record_gradient(
2884       "Erfc", _inputs_flat, _attrs, _result, name)
2885   _result, = _result
2886   return _result
2887 
2888 
2889 @tf_export('math.exp', 'exp')
2890 @deprecated_endpoints('exp')
2891 def exp(x, name=None):
2892   r"""Computes exponential of x element-wise.  \\(y = e^x\\).
2893 
2894   Args:
2895     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
2896     name: A name for the operation (optional).
2897 
2898   Returns:
2899     A `Tensor`. Has the same type as `x`.
2900   """
2901   _ctx = _context._context
2902   if _ctx is None or not _ctx._eager_context.is_eager:
2903     _, _, _op = _op_def_lib._apply_op_helper(
2904         "Exp", x=x, name=name)
2905     _result = _op.outputs[:]
2906     _inputs_flat = _op.inputs
2907     _attrs = ("T", _op.get_attr("T"))
2908     _execute.record_gradient(
2909       "Exp", _inputs_flat, _attrs, _result, name)
2910     _result, = _result
2911     return _result
2912 
2913   else:
2914     try:
2915       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2916         _ctx._context_handle, _ctx._eager_context.device_name, "Exp", name,
2917         _ctx._post_execution_callbacks, x)
2918       return _result
2919     except _core._FallbackException:
2920       return exp_eager_fallback(
2921           x, name=name, ctx=_ctx)
2922     except _core._NotOkStatusException as e:
2923       if name is not None:
2924         message = e.message + " name: " + name
2925       else:
2926         message = e.message
2927       _six.raise_from(_core._status_to_exception(e.code, message), None)
2928 
2929 
2930 def exp_eager_fallback(x, name=None, ctx=None):
2931   r"""This is the slowpath function for Eager mode.
2932   This is for function exp
2933   """
2934   _ctx = ctx if ctx else _context.context()
2935   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2936   _inputs_flat = [x]
2937   _attrs = ("T", _attr_T)
2938   _result = _execute.execute(b"Exp", 1, inputs=_inputs_flat, attrs=_attrs,
2939                              ctx=_ctx, name=name)
2940   _execute.record_gradient(
2941       "Exp", _inputs_flat, _attrs, _result, name)
2942   _result, = _result
2943   return _result
2944 
2945 
2946 @tf_export('math.expm1', 'expm1')
2947 @deprecated_endpoints('expm1')
2948 def expm1(x, name=None):
2949   r"""Computes exponential of x - 1 element-wise.
2950 
2951   I.e., \\(y = (\exp x) - 1\\).
2952 
2953   Args:
2954     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
2955     name: A name for the operation (optional).
2956 
2957   Returns:
2958     A `Tensor`. Has the same type as `x`.
2959   """
2960   _ctx = _context._context
2961   if _ctx is None or not _ctx._eager_context.is_eager:
2962     _, _, _op = _op_def_lib._apply_op_helper(
2963         "Expm1", x=x, name=name)
2964     _result = _op.outputs[:]
2965     _inputs_flat = _op.inputs
2966     _attrs = ("T", _op.get_attr("T"))
2967     _execute.record_gradient(
2968       "Expm1", _inputs_flat, _attrs, _result, name)
2969     _result, = _result
2970     return _result
2971 
2972   else:
2973     try:
2974       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
2975         _ctx._context_handle, _ctx._eager_context.device_name, "Expm1", name,
2976         _ctx._post_execution_callbacks, x)
2977       return _result
2978     except _core._FallbackException:
2979       return expm1_eager_fallback(
2980           x, name=name, ctx=_ctx)
2981     except _core._NotOkStatusException as e:
2982       if name is not None:
2983         message = e.message + " name: " + name
2984       else:
2985         message = e.message
2986       _six.raise_from(_core._status_to_exception(e.code, message), None)
2987 
2988 
2989 def expm1_eager_fallback(x, name=None, ctx=None):
2990   r"""This is the slowpath function for Eager mode.
2991   This is for function expm1
2992   """
2993   _ctx = ctx if ctx else _context.context()
2994   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
2995   _inputs_flat = [x]
2996   _attrs = ("T", _attr_T)
2997   _result = _execute.execute(b"Expm1", 1, inputs=_inputs_flat, attrs=_attrs,
2998                              ctx=_ctx, name=name)
2999   _execute.record_gradient(
3000       "Expm1", _inputs_flat, _attrs, _result, name)
3001   _result, = _result
3002   return _result
3003 
3004 
3005 @tf_export('math.floor', 'floor')
3006 @deprecated_endpoints('floor')
3007 def floor(x, name=None):
3008   r"""Returns element-wise largest integer not greater than x.
3009 
3010   Args:
3011     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
3012     name: A name for the operation (optional).
3013 
3014   Returns:
3015     A `Tensor`. Has the same type as `x`.
3016   """
3017   _ctx = _context._context
3018   if _ctx is None or not _ctx._eager_context.is_eager:
3019     _, _, _op = _op_def_lib._apply_op_helper(
3020         "Floor", x=x, name=name)
3021     _result = _op.outputs[:]
3022     _inputs_flat = _op.inputs
3023     _attrs = ("T", _op.get_attr("T"))
3024     _execute.record_gradient(
3025       "Floor", _inputs_flat, _attrs, _result, name)
3026     _result, = _result
3027     return _result
3028 
3029   else:
3030     try:
3031       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3032         _ctx._context_handle, _ctx._eager_context.device_name, "Floor", name,
3033         _ctx._post_execution_callbacks, x)
3034       return _result
3035     except _core._FallbackException:
3036       return floor_eager_fallback(
3037           x, name=name, ctx=_ctx)
3038     except _core._NotOkStatusException as e:
3039       if name is not None:
3040         message = e.message + " name: " + name
3041       else:
3042         message = e.message
3043       _six.raise_from(_core._status_to_exception(e.code, message), None)
3044 
3045 
3046 def floor_eager_fallback(x, name=None, ctx=None):
3047   r"""This is the slowpath function for Eager mode.
3048   This is for function floor
3049   """
3050   _ctx = ctx if ctx else _context.context()
3051   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
3052   _inputs_flat = [x]
3053   _attrs = ("T", _attr_T)
3054   _result = _execute.execute(b"Floor", 1, inputs=_inputs_flat, attrs=_attrs,
3055                              ctx=_ctx, name=name)
3056   _execute.record_gradient(
3057       "Floor", _inputs_flat, _attrs, _result, name)
3058   _result, = _result
3059   return _result
3060 
3061 
3062 def floor_div(x, y, name=None):
3063   r"""Returns x // y element-wise.
3064 
3065   *NOTE*: `FloorDiv` supports broadcasting. More about broadcasting
3066   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3067 
3068   Args:
3069     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
3070     y: A `Tensor`. Must have the same type as `x`.
3071     name: A name for the operation (optional).
3072 
3073   Returns:
3074     A `Tensor`. Has the same type as `x`.
3075   """
3076   _ctx = _context._context
3077   if _ctx is None or not _ctx._eager_context.is_eager:
3078     _, _, _op = _op_def_lib._apply_op_helper(
3079         "FloorDiv", x=x, y=y, name=name)
3080     _result = _op.outputs[:]
3081     _inputs_flat = _op.inputs
3082     _attrs = ("T", _op.get_attr("T"))
3083     _execute.record_gradient(
3084       "FloorDiv", _inputs_flat, _attrs, _result, name)
3085     _result, = _result
3086     return _result
3087 
3088   else:
3089     try:
3090       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3091         _ctx._context_handle, _ctx._eager_context.device_name, "FloorDiv",
3092         name, _ctx._post_execution_callbacks, x, y)
3093       return _result
3094     except _core._FallbackException:
3095       return floor_div_eager_fallback(
3096           x, y, name=name, ctx=_ctx)
3097     except _core._NotOkStatusException as e:
3098       if name is not None:
3099         message = e.message + " name: " + name
3100       else:
3101         message = e.message
3102       _six.raise_from(_core._status_to_exception(e.code, message), None)
3103 
3104 
3105 def floor_div_eager_fallback(x, y, name=None, ctx=None):
3106   r"""This is the slowpath function for Eager mode.
3107   This is for function floor_div
3108   """
3109   _ctx = ctx if ctx else _context.context()
3110   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
3111   (x, y) = _inputs_T
3112   _inputs_flat = [x, y]
3113   _attrs = ("T", _attr_T)
3114   _result = _execute.execute(b"FloorDiv", 1, inputs=_inputs_flat,
3115                              attrs=_attrs, ctx=_ctx, name=name)
3116   _execute.record_gradient(
3117       "FloorDiv", _inputs_flat, _attrs, _result, name)
3118   _result, = _result
3119   return _result
3120 
3121 
3122 def floor_mod(x, y, name=None):
3123   r"""Returns element-wise remainder of division. When `x < 0` xor `y < 0` is
3124 
3125   true, this follows Python semantics in that the result here is consistent
3126   with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`.
3127 
3128   *NOTE*: `FloorMod` supports broadcasting. More about broadcasting
3129   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3130 
3131   Args:
3132     x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`.
3133     y: A `Tensor`. Must have the same type as `x`.
3134     name: A name for the operation (optional).
3135 
3136   Returns:
3137     A `Tensor`. Has the same type as `x`.
3138   """
3139   _ctx = _context._context
3140   if _ctx is None or not _ctx._eager_context.is_eager:
3141     _, _, _op = _op_def_lib._apply_op_helper(
3142         "FloorMod", x=x, y=y, name=name)
3143     _result = _op.outputs[:]
3144     _inputs_flat = _op.inputs
3145     _attrs = ("T", _op.get_attr("T"))
3146     _execute.record_gradient(
3147       "FloorMod", _inputs_flat, _attrs, _result, name)
3148     _result, = _result
3149     return _result
3150 
3151   else:
3152     try:
3153       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3154         _ctx._context_handle, _ctx._eager_context.device_name, "FloorMod",
3155         name, _ctx._post_execution_callbacks, x, y)
3156       return _result
3157     except _core._FallbackException:
3158       return floor_mod_eager_fallback(
3159           x, y, name=name, ctx=_ctx)
3160     except _core._NotOkStatusException as e:
3161       if name is not None:
3162         message = e.message + " name: " + name
3163       else:
3164         message = e.message
3165       _six.raise_from(_core._status_to_exception(e.code, message), None)
3166 
3167 
3168 def floor_mod_eager_fallback(x, y, name=None, ctx=None):
3169   r"""This is the slowpath function for Eager mode.
3170   This is for function floor_mod
3171   """
3172   _ctx = ctx if ctx else _context.context()
3173   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
3174   (x, y) = _inputs_T
3175   _inputs_flat = [x, y]
3176   _attrs = ("T", _attr_T)
3177   _result = _execute.execute(b"FloorMod", 1, inputs=_inputs_flat,
3178                              attrs=_attrs, ctx=_ctx, name=name)
3179   _execute.record_gradient(
3180       "FloorMod", _inputs_flat, _attrs, _result, name)
3181   _result, = _result
3182   return _result
3183 
3184 
3185 @tf_export('math.greater', 'greater')
3186 @deprecated_endpoints('greater')
3187 def greater(x, y, name=None):
3188   r"""Returns the truth value of (x > y) element-wise.
3189 
3190   *NOTE*: `math.greater` supports broadcasting. More about broadcasting
3191   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3192 
3193   Args:
3194     x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
3195     y: A `Tensor`. Must have the same type as `x`.
3196     name: A name for the operation (optional).
3197 
3198   Returns:
3199     A `Tensor` of type `bool`.
3200   """
3201   _ctx = _context._context
3202   if _ctx is None or not _ctx._eager_context.is_eager:
3203     _, _, _op = _op_def_lib._apply_op_helper(
3204         "Greater", x=x, y=y, name=name)
3205     _result = _op.outputs[:]
3206     _inputs_flat = _op.inputs
3207     _attrs = ("T", _op.get_attr("T"))
3208     _execute.record_gradient(
3209       "Greater", _inputs_flat, _attrs, _result, name)
3210     _result, = _result
3211     return _result
3212 
3213   else:
3214     try:
3215       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3216         _ctx._context_handle, _ctx._eager_context.device_name, "Greater",
3217         name, _ctx._post_execution_callbacks, x, y)
3218       return _result
3219     except _core._FallbackException:
3220       return greater_eager_fallback(
3221           x, y, name=name, ctx=_ctx)
3222     except _core._NotOkStatusException as e:
3223       if name is not None:
3224         message = e.message + " name: " + name
3225       else:
3226         message = e.message
3227       _six.raise_from(_core._status_to_exception(e.code, message), None)
3228 
3229 
3230 def greater_eager_fallback(x, y, name=None, ctx=None):
3231   r"""This is the slowpath function for Eager mode.
3232   This is for function greater
3233   """
3234   _ctx = ctx if ctx else _context.context()
3235   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
3236   (x, y) = _inputs_T
3237   _inputs_flat = [x, y]
3238   _attrs = ("T", _attr_T)
3239   _result = _execute.execute(b"Greater", 1, inputs=_inputs_flat, attrs=_attrs,
3240                              ctx=_ctx, name=name)
3241   _execute.record_gradient(
3242       "Greater", _inputs_flat, _attrs, _result, name)
3243   _result, = _result
3244   return _result
3245 
3246 
3247 @tf_export('math.greater_equal', 'greater_equal')
3248 @deprecated_endpoints('greater_equal')
3249 def greater_equal(x, y, name=None):
3250   r"""Returns the truth value of (x >= y) element-wise.
3251 
3252   *NOTE*: `math.greater_equal` supports broadcasting. More about broadcasting
3253   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3254 
3255   Args:
3256     x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
3257     y: A `Tensor`. Must have the same type as `x`.
3258     name: A name for the operation (optional).
3259 
3260   Returns:
3261     A `Tensor` of type `bool`.
3262   """
3263   _ctx = _context._context
3264   if _ctx is None or not _ctx._eager_context.is_eager:
3265     _, _, _op = _op_def_lib._apply_op_helper(
3266         "GreaterEqual", x=x, y=y, name=name)
3267     _result = _op.outputs[:]
3268     _inputs_flat = _op.inputs
3269     _attrs = ("T", _op.get_attr("T"))
3270     _execute.record_gradient(
3271       "GreaterEqual", _inputs_flat, _attrs, _result, name)
3272     _result, = _result
3273     return _result
3274 
3275   else:
3276     try:
3277       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3278         _ctx._context_handle, _ctx._eager_context.device_name, "GreaterEqual",
3279         name, _ctx._post_execution_callbacks, x, y)
3280       return _result
3281     except _core._FallbackException:
3282       return greater_equal_eager_fallback(
3283           x, y, name=name, ctx=_ctx)
3284     except _core._NotOkStatusException as e:
3285       if name is not None:
3286         message = e.message + " name: " + name
3287       else:
3288         message = e.message
3289       _six.raise_from(_core._status_to_exception(e.code, message), None)
3290 
3291 
3292 def greater_equal_eager_fallback(x, y, name=None, ctx=None):
3293   r"""This is the slowpath function for Eager mode.
3294   This is for function greater_equal
3295   """
3296   _ctx = ctx if ctx else _context.context()
3297   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
3298   (x, y) = _inputs_T
3299   _inputs_flat = [x, y]
3300   _attrs = ("T", _attr_T)
3301   _result = _execute.execute(b"GreaterEqual", 1, inputs=_inputs_flat,
3302                              attrs=_attrs, ctx=_ctx, name=name)
3303   _execute.record_gradient(
3304       "GreaterEqual", _inputs_flat, _attrs, _result, name)
3305   _result, = _result
3306   return _result
3307 
3308 
3309 def _histogram_fixed_width(values, value_range, nbins, dtype=_dtypes.int32, name=None):
3310   r"""Return histogram of values.
3311 
3312   Given the tensor `values`, this operation returns a rank 1 histogram counting
3313   the number of entries in `values` that fall into every bin.  The bins are
3314   equal width and determined by the arguments `value_range` and `nbins`.
3315 
3316   ```python
3317   # Bins will be:  (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
3318   nbins = 5
3319   value_range = [0.0, 5.0]
3320   new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
3321 
3322   with tf.get_default_session() as sess:
3323     hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
3324     variables.global_variables_initializer().run()
3325     sess.run(hist) => [2, 1, 1, 0, 2]
3326   ```
3327 
3328   Args:
3329     values: A `Tensor`. Must be one of the following types: `int32`, `int64`, `float32`, `float64`.
3330       Numeric `Tensor`.
3331     value_range: A `Tensor`. Must have the same type as `values`.
3332       Shape [2] `Tensor` of same `dtype` as `values`.
3333       values <= value_range[0] will be mapped to hist[0],
3334       values >= value_range[1] will be mapped to hist[-1].
3335     nbins: A `Tensor` of type `int32`.
3336       Scalar `int32 Tensor`.  Number of histogram bins.
3337     dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
3338     name: A name for the operation (optional).
3339 
3340   Returns:
3341     A `Tensor` of type `dtype`.
3342   """
3343   _ctx = _context._context
3344   if _ctx is None or not _ctx._eager_context.is_eager:
3345     if dtype is None:
3346       dtype = _dtypes.int32
3347     dtype = _execute.make_type(dtype, "dtype")
3348     _, _, _op = _op_def_lib._apply_op_helper(
3349         "HistogramFixedWidth", values=values, value_range=value_range,
3350         nbins=nbins, dtype=dtype, name=name)
3351     _result = _op.outputs[:]
3352     _inputs_flat = _op.inputs
3353     _attrs = ("T", _op.get_attr("T"), "dtype", _op.get_attr("dtype"))
3354     _execute.record_gradient(
3355       "HistogramFixedWidth", _inputs_flat, _attrs, _result, name)
3356     _result, = _result
3357     return _result
3358 
3359   else:
3360     try:
3361       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3362         _ctx._context_handle, _ctx._eager_context.device_name,
3363         "HistogramFixedWidth", name, _ctx._post_execution_callbacks, values,
3364         value_range, nbins, "dtype", dtype)
3365       return _result
3366     except _core._FallbackException:
3367       return _histogram_fixed_width_eager_fallback(
3368           values, value_range, nbins, dtype=dtype, name=name, ctx=_ctx)
3369     except _core._NotOkStatusException as e:
3370       if name is not None:
3371         message = e.message + " name: " + name
3372       else:
3373         message = e.message
3374       _six.raise_from(_core._status_to_exception(e.code, message), None)
3375 
3376 
3377 def _histogram_fixed_width_eager_fallback(values, value_range, nbins, dtype=_dtypes.int32, name=None, ctx=None):
3378   r"""This is the slowpath function for Eager mode.
3379   This is for function _histogram_fixed_width
3380   """
3381   _ctx = ctx if ctx else _context.context()
3382   if dtype is None:
3383     dtype = _dtypes.int32
3384   dtype = _execute.make_type(dtype, "dtype")
3385   _attr_T, _inputs_T = _execute.args_to_matching_eager([values, value_range], _ctx)
3386   (values, value_range) = _inputs_T
3387   nbins = _ops.convert_to_tensor(nbins, _dtypes.int32)
3388   _inputs_flat = [values, value_range, nbins]
3389   _attrs = ("T", _attr_T, "dtype", dtype)
3390   _result = _execute.execute(b"HistogramFixedWidth", 1, inputs=_inputs_flat,
3391                              attrs=_attrs, ctx=_ctx, name=name)
3392   _execute.record_gradient(
3393       "HistogramFixedWidth", _inputs_flat, _attrs, _result, name)
3394   _result, = _result
3395   return _result
3396 
3397 
3398 @tf_export('math.igamma', 'igamma')
3399 @deprecated_endpoints('igamma')
3400 def igamma(a, x, name=None):
3401   r"""Compute the lower regularized incomplete Gamma function `P(a, x)`.
3402 
3403   The lower regularized incomplete Gamma function is defined as:
3404 
3405 
3406   \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\)
3407 
3408   where
3409 
3410   \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\)
3411 
3412   is the lower incomplete Gamma function.
3413 
3414   Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete
3415   Gamma function.
3416 
3417   Args:
3418     a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
3419     x: A `Tensor`. Must have the same type as `a`.
3420     name: A name for the operation (optional).
3421 
3422   Returns:
3423     A `Tensor`. Has the same type as `a`.
3424   """
3425   _ctx = _context._context
3426   if _ctx is None or not _ctx._eager_context.is_eager:
3427     _, _, _op = _op_def_lib._apply_op_helper(
3428         "Igamma", a=a, x=x, name=name)
3429     _result = _op.outputs[:]
3430     _inputs_flat = _op.inputs
3431     _attrs = ("T", _op.get_attr("T"))
3432     _execute.record_gradient(
3433       "Igamma", _inputs_flat, _attrs, _result, name)
3434     _result, = _result
3435     return _result
3436 
3437   else:
3438     try:
3439       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3440         _ctx._context_handle, _ctx._eager_context.device_name, "Igamma", name,
3441         _ctx._post_execution_callbacks, a, x)
3442       return _result
3443     except _core._FallbackException:
3444       return igamma_eager_fallback(
3445           a, x, name=name, ctx=_ctx)
3446     except _core._NotOkStatusException as e:
3447       if name is not None:
3448         message = e.message + " name: " + name
3449       else:
3450         message = e.message
3451       _six.raise_from(_core._status_to_exception(e.code, message), None)
3452 
3453 
3454 def igamma_eager_fallback(a, x, name=None, ctx=None):
3455   r"""This is the slowpath function for Eager mode.
3456   This is for function igamma
3457   """
3458   _ctx = ctx if ctx else _context.context()
3459   _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
3460   (a, x) = _inputs_T
3461   _inputs_flat = [a, x]
3462   _attrs = ("T", _attr_T)
3463   _result = _execute.execute(b"Igamma", 1, inputs=_inputs_flat, attrs=_attrs,
3464                              ctx=_ctx, name=name)
3465   _execute.record_gradient(
3466       "Igamma", _inputs_flat, _attrs, _result, name)
3467   _result, = _result
3468   return _result
3469 
3470 
3471 def igamma_grad_a(a, x, name=None):
3472   r"""Computes the gradient of `igamma(a, x)` wrt `a`.
3473 
3474   Args:
3475     a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
3476     x: A `Tensor`. Must have the same type as `a`.
3477     name: A name for the operation (optional).
3478 
3479   Returns:
3480     A `Tensor`. Has the same type as `a`.
3481   """
3482   _ctx = _context._context
3483   if _ctx is None or not _ctx._eager_context.is_eager:
3484     _, _, _op = _op_def_lib._apply_op_helper(
3485         "IgammaGradA", a=a, x=x, name=name)
3486     _result = _op.outputs[:]
3487     _inputs_flat = _op.inputs
3488     _attrs = ("T", _op.get_attr("T"))
3489     _execute.record_gradient(
3490       "IgammaGradA", _inputs_flat, _attrs, _result, name)
3491     _result, = _result
3492     return _result
3493 
3494   else:
3495     try:
3496       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3497         _ctx._context_handle, _ctx._eager_context.device_name, "IgammaGradA",
3498         name, _ctx._post_execution_callbacks, a, x)
3499       return _result
3500     except _core._FallbackException:
3501       return igamma_grad_a_eager_fallback(
3502           a, x, name=name, ctx=_ctx)
3503     except _core._NotOkStatusException as e:
3504       if name is not None:
3505         message = e.message + " name: " + name
3506       else:
3507         message = e.message
3508       _six.raise_from(_core._status_to_exception(e.code, message), None)
3509 
3510 
3511 def igamma_grad_a_eager_fallback(a, x, name=None, ctx=None):
3512   r"""This is the slowpath function for Eager mode.
3513   This is for function igamma_grad_a
3514   """
3515   _ctx = ctx if ctx else _context.context()
3516   _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
3517   (a, x) = _inputs_T
3518   _inputs_flat = [a, x]
3519   _attrs = ("T", _attr_T)
3520   _result = _execute.execute(b"IgammaGradA", 1, inputs=_inputs_flat,
3521                              attrs=_attrs, ctx=_ctx, name=name)
3522   _execute.record_gradient(
3523       "IgammaGradA", _inputs_flat, _attrs, _result, name)
3524   _result, = _result
3525   return _result
3526 
3527 
3528 @tf_export('math.igammac', 'igammac')
3529 @deprecated_endpoints('igammac')
3530 def igammac(a, x, name=None):
3531   r"""Compute the upper regularized incomplete Gamma function `Q(a, x)`.
3532 
3533   The upper regularized incomplete Gamma function is defined as:
3534 
3535   \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\)
3536 
3537   where
3538 
3539   \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\)
3540 
3541   is the upper incomplete Gama function.
3542 
3543   Note, above `P(a, x)` (`Igamma`) is the lower regularized complete
3544   Gamma function.
3545 
3546   Args:
3547     a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
3548     x: A `Tensor`. Must have the same type as `a`.
3549     name: A name for the operation (optional).
3550 
3551   Returns:
3552     A `Tensor`. Has the same type as `a`.
3553   """
3554   _ctx = _context._context
3555   if _ctx is None or not _ctx._eager_context.is_eager:
3556     _, _, _op = _op_def_lib._apply_op_helper(
3557         "Igammac", a=a, x=x, name=name)
3558     _result = _op.outputs[:]
3559     _inputs_flat = _op.inputs
3560     _attrs = ("T", _op.get_attr("T"))
3561     _execute.record_gradient(
3562       "Igammac", _inputs_flat, _attrs, _result, name)
3563     _result, = _result
3564     return _result
3565 
3566   else:
3567     try:
3568       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3569         _ctx._context_handle, _ctx._eager_context.device_name, "Igammac",
3570         name, _ctx._post_execution_callbacks, a, x)
3571       return _result
3572     except _core._FallbackException:
3573       return igammac_eager_fallback(
3574           a, x, name=name, ctx=_ctx)
3575     except _core._NotOkStatusException as e:
3576       if name is not None:
3577         message = e.message + " name: " + name
3578       else:
3579         message = e.message
3580       _six.raise_from(_core._status_to_exception(e.code, message), None)
3581 
3582 
3583 def igammac_eager_fallback(a, x, name=None, ctx=None):
3584   r"""This is the slowpath function for Eager mode.
3585   This is for function igammac
3586   """
3587   _ctx = ctx if ctx else _context.context()
3588   _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
3589   (a, x) = _inputs_T
3590   _inputs_flat = [a, x]
3591   _attrs = ("T", _attr_T)
3592   _result = _execute.execute(b"Igammac", 1, inputs=_inputs_flat, attrs=_attrs,
3593                              ctx=_ctx, name=name)
3594   _execute.record_gradient(
3595       "Igammac", _inputs_flat, _attrs, _result, name)
3596   _result, = _result
3597   return _result
3598 
3599 
3600 def imag(input, Tout=_dtypes.float32, name=None):
3601   r"""Returns the imaginary part of a complex number.
3602 
3603   Given a tensor `input` of complex numbers, this operation returns a tensor of
3604   type `float` that is the imaginary part of each element in `input`. All
3605   elements in `input` must be complex numbers of the form \\(a + bj\\), where *a*
3606   is the real part and *b* is the imaginary part returned by this operation.
3607 
3608   For example:
3609 
3610   ```
3611   # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
3612   tf.imag(input) ==> [4.75, 5.75]
3613   ```
3614 
3615   Args:
3616     input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
3617     Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
3618     name: A name for the operation (optional).
3619 
3620   Returns:
3621     A `Tensor` of type `Tout`.
3622   """
3623   _ctx = _context._context
3624   if _ctx is None or not _ctx._eager_context.is_eager:
3625     if Tout is None:
3626       Tout = _dtypes.float32
3627     Tout = _execute.make_type(Tout, "Tout")
3628     _, _, _op = _op_def_lib._apply_op_helper(
3629         "Imag", input=input, Tout=Tout, name=name)
3630     _result = _op.outputs[:]
3631     _inputs_flat = _op.inputs
3632     _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
3633     _execute.record_gradient(
3634       "Imag", _inputs_flat, _attrs, _result, name)
3635     _result, = _result
3636     return _result
3637 
3638   else:
3639     try:
3640       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3641         _ctx._context_handle, _ctx._eager_context.device_name, "Imag", name,
3642         _ctx._post_execution_callbacks, input, "Tout", Tout)
3643       return _result
3644     except _core._FallbackException:
3645       return imag_eager_fallback(
3646           input, Tout=Tout, name=name, ctx=_ctx)
3647     except _core._NotOkStatusException as e:
3648       if name is not None:
3649         message = e.message + " name: " + name
3650       else:
3651         message = e.message
3652       _six.raise_from(_core._status_to_exception(e.code, message), None)
3653 
3654 
3655 def imag_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
3656   r"""This is the slowpath function for Eager mode.
3657   This is for function imag
3658   """
3659   _ctx = ctx if ctx else _context.context()
3660   if Tout is None:
3661     Tout = _dtypes.float32
3662   Tout = _execute.make_type(Tout, "Tout")
3663   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
3664   _inputs_flat = [input]
3665   _attrs = ("T", _attr_T, "Tout", Tout)
3666   _result = _execute.execute(b"Imag", 1, inputs=_inputs_flat, attrs=_attrs,
3667                              ctx=_ctx, name=name)
3668   _execute.record_gradient(
3669       "Imag", _inputs_flat, _attrs, _result, name)
3670   _result, = _result
3671   return _result
3672 
3673 
3674 def inv(x, name=None):
3675   r"""Computes the reciprocal of x element-wise.
3676 
3677   I.e., \\(y = 1 / x\\).
3678 
3679   Args:
3680     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
3681     name: A name for the operation (optional).
3682 
3683   Returns:
3684     A `Tensor`. Has the same type as `x`.
3685   """
3686   _ctx = _context._context
3687   if _ctx is None or not _ctx._eager_context.is_eager:
3688     _, _, _op = _op_def_lib._apply_op_helper(
3689         "Inv", x=x, name=name)
3690     _result = _op.outputs[:]
3691     _inputs_flat = _op.inputs
3692     _attrs = ("T", _op.get_attr("T"))
3693     _execute.record_gradient(
3694       "Inv", _inputs_flat, _attrs, _result, name)
3695     _result, = _result
3696     return _result
3697 
3698   else:
3699     try:
3700       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3701         _ctx._context_handle, _ctx._eager_context.device_name, "Inv", name,
3702         _ctx._post_execution_callbacks, x)
3703       return _result
3704     except _core._FallbackException:
3705       return inv_eager_fallback(
3706           x, name=name, ctx=_ctx)
3707     except _core._NotOkStatusException as e:
3708       if name is not None:
3709         message = e.message + " name: " + name
3710       else:
3711         message = e.message
3712       _six.raise_from(_core._status_to_exception(e.code, message), None)
3713 
3714 
3715 def inv_eager_fallback(x, name=None, ctx=None):
3716   r"""This is the slowpath function for Eager mode.
3717   This is for function inv
3718   """
3719   _ctx = ctx if ctx else _context.context()
3720   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
3721   _inputs_flat = [x]
3722   _attrs = ("T", _attr_T)
3723   _result = _execute.execute(b"Inv", 1, inputs=_inputs_flat, attrs=_attrs,
3724                              ctx=_ctx, name=name)
3725   _execute.record_gradient(
3726       "Inv", _inputs_flat, _attrs, _result, name)
3727   _result, = _result
3728   return _result
3729 
3730 
3731 def inv_grad(y, dy, name=None):
3732   r"""Computes the gradient for the inverse of `x` wrt its input.
3733 
3734   Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
3735   is the corresponding input gradient.
3736 
3737   Args:
3738     y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
3739     dy: A `Tensor`. Must have the same type as `y`.
3740     name: A name for the operation (optional).
3741 
3742   Returns:
3743     A `Tensor`. Has the same type as `y`.
3744   """
3745   _ctx = _context._context
3746   if _ctx is None or not _ctx._eager_context.is_eager:
3747     _, _, _op = _op_def_lib._apply_op_helper(
3748         "InvGrad", y=y, dy=dy, name=name)
3749     _result = _op.outputs[:]
3750     _inputs_flat = _op.inputs
3751     _attrs = ("T", _op.get_attr("T"))
3752     _execute.record_gradient(
3753       "InvGrad", _inputs_flat, _attrs, _result, name)
3754     _result, = _result
3755     return _result
3756 
3757   else:
3758     try:
3759       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3760         _ctx._context_handle, _ctx._eager_context.device_name, "InvGrad",
3761         name, _ctx._post_execution_callbacks, y, dy)
3762       return _result
3763     except _core._FallbackException:
3764       return inv_grad_eager_fallback(
3765           y, dy, name=name, ctx=_ctx)
3766     except _core._NotOkStatusException as e:
3767       if name is not None:
3768         message = e.message + " name: " + name
3769       else:
3770         message = e.message
3771       _six.raise_from(_core._status_to_exception(e.code, message), None)
3772 
3773 
3774 def inv_grad_eager_fallback(y, dy, name=None, ctx=None):
3775   r"""This is the slowpath function for Eager mode.
3776   This is for function inv_grad
3777   """
3778   _ctx = ctx if ctx else _context.context()
3779   _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
3780   (y, dy) = _inputs_T
3781   _inputs_flat = [y, dy]
3782   _attrs = ("T", _attr_T)
3783   _result = _execute.execute(b"InvGrad", 1, inputs=_inputs_flat, attrs=_attrs,
3784                              ctx=_ctx, name=name)
3785   _execute.record_gradient(
3786       "InvGrad", _inputs_flat, _attrs, _result, name)
3787   _result, = _result
3788   return _result
3789 
3790 
3791 @tf_export('debugging.is_finite', 'is_finite')
3792 @deprecated_endpoints('is_finite')
3793 def is_finite(x, name=None):
3794   r"""Returns which elements of x are finite.
3795 
3796   @compatibility(numpy)
3797   Equivalent to np.isfinite
3798   @end_compatibility
3799 
3800   Args:
3801     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
3802     name: A name for the operation (optional).
3803 
3804   Returns:
3805     A `Tensor` of type `bool`.
3806   """
3807   _ctx = _context._context
3808   if _ctx is None or not _ctx._eager_context.is_eager:
3809     _, _, _op = _op_def_lib._apply_op_helper(
3810         "IsFinite", x=x, name=name)
3811     _result = _op.outputs[:]
3812     _inputs_flat = _op.inputs
3813     _attrs = ("T", _op.get_attr("T"))
3814     _execute.record_gradient(
3815       "IsFinite", _inputs_flat, _attrs, _result, name)
3816     _result, = _result
3817     return _result
3818 
3819   else:
3820     try:
3821       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3822         _ctx._context_handle, _ctx._eager_context.device_name, "IsFinite",
3823         name, _ctx._post_execution_callbacks, x)
3824       return _result
3825     except _core._FallbackException:
3826       return is_finite_eager_fallback(
3827           x, name=name, ctx=_ctx)
3828     except _core._NotOkStatusException as e:
3829       if name is not None:
3830         message = e.message + " name: " + name
3831       else:
3832         message = e.message
3833       _six.raise_from(_core._status_to_exception(e.code, message), None)
3834 
3835 
3836 def is_finite_eager_fallback(x, name=None, ctx=None):
3837   r"""This is the slowpath function for Eager mode.
3838   This is for function is_finite
3839   """
3840   _ctx = ctx if ctx else _context.context()
3841   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
3842   _inputs_flat = [x]
3843   _attrs = ("T", _attr_T)
3844   _result = _execute.execute(b"IsFinite", 1, inputs=_inputs_flat,
3845                              attrs=_attrs, ctx=_ctx, name=name)
3846   _execute.record_gradient(
3847       "IsFinite", _inputs_flat, _attrs, _result, name)
3848   _result, = _result
3849   return _result
3850 
3851 
3852 @tf_export('debugging.is_inf', 'is_inf')
3853 @deprecated_endpoints('is_inf')
3854 def is_inf(x, name=None):
3855   r"""Returns which elements of x are Inf.
3856 
3857   @compatibility(numpy)
3858   Equivalent to np.isinf
3859   @end_compatibility
3860 
3861   Args:
3862     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
3863     name: A name for the operation (optional).
3864 
3865   Returns:
3866     A `Tensor` of type `bool`.
3867   """
3868   _ctx = _context._context
3869   if _ctx is None or not _ctx._eager_context.is_eager:
3870     _, _, _op = _op_def_lib._apply_op_helper(
3871         "IsInf", x=x, name=name)
3872     _result = _op.outputs[:]
3873     _inputs_flat = _op.inputs
3874     _attrs = ("T", _op.get_attr("T"))
3875     _execute.record_gradient(
3876       "IsInf", _inputs_flat, _attrs, _result, name)
3877     _result, = _result
3878     return _result
3879 
3880   else:
3881     try:
3882       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3883         _ctx._context_handle, _ctx._eager_context.device_name, "IsInf", name,
3884         _ctx._post_execution_callbacks, x)
3885       return _result
3886     except _core._FallbackException:
3887       return is_inf_eager_fallback(
3888           x, name=name, ctx=_ctx)
3889     except _core._NotOkStatusException as e:
3890       if name is not None:
3891         message = e.message + " name: " + name
3892       else:
3893         message = e.message
3894       _six.raise_from(_core._status_to_exception(e.code, message), None)
3895 
3896 
3897 def is_inf_eager_fallback(x, name=None, ctx=None):
3898   r"""This is the slowpath function for Eager mode.
3899   This is for function is_inf
3900   """
3901   _ctx = ctx if ctx else _context.context()
3902   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
3903   _inputs_flat = [x]
3904   _attrs = ("T", _attr_T)
3905   _result = _execute.execute(b"IsInf", 1, inputs=_inputs_flat, attrs=_attrs,
3906                              ctx=_ctx, name=name)
3907   _execute.record_gradient(
3908       "IsInf", _inputs_flat, _attrs, _result, name)
3909   _result, = _result
3910   return _result
3911 
3912 
3913 @tf_export('debugging.is_nan', 'is_nan')
3914 @deprecated_endpoints('is_nan')
3915 def is_nan(x, name=None):
3916   r"""Returns which elements of x are NaN.
3917 
3918   @compatibility(numpy)
3919   Equivalent to np.isnan
3920   @end_compatibility
3921 
3922   Args:
3923     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
3924     name: A name for the operation (optional).
3925 
3926   Returns:
3927     A `Tensor` of type `bool`.
3928   """
3929   _ctx = _context._context
3930   if _ctx is None or not _ctx._eager_context.is_eager:
3931     _, _, _op = _op_def_lib._apply_op_helper(
3932         "IsNan", x=x, name=name)
3933     _result = _op.outputs[:]
3934     _inputs_flat = _op.inputs
3935     _attrs = ("T", _op.get_attr("T"))
3936     _execute.record_gradient(
3937       "IsNan", _inputs_flat, _attrs, _result, name)
3938     _result, = _result
3939     return _result
3940 
3941   else:
3942     try:
3943       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
3944         _ctx._context_handle, _ctx._eager_context.device_name, "IsNan", name,
3945         _ctx._post_execution_callbacks, x)
3946       return _result
3947     except _core._FallbackException:
3948       return is_nan_eager_fallback(
3949           x, name=name, ctx=_ctx)
3950     except _core._NotOkStatusException as e:
3951       if name is not None:
3952         message = e.message + " name: " + name
3953       else:
3954         message = e.message
3955       _six.raise_from(_core._status_to_exception(e.code, message), None)
3956 
3957 
3958 def is_nan_eager_fallback(x, name=None, ctx=None):
3959   r"""This is the slowpath function for Eager mode.
3960   This is for function is_nan
3961   """
3962   _ctx = ctx if ctx else _context.context()
3963   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
3964   _inputs_flat = [x]
3965   _attrs = ("T", _attr_T)
3966   _result = _execute.execute(b"IsNan", 1, inputs=_inputs_flat, attrs=_attrs,
3967                              ctx=_ctx, name=name)
3968   _execute.record_gradient(
3969       "IsNan", _inputs_flat, _attrs, _result, name)
3970   _result, = _result
3971   return _result
3972 
3973 
3974 @tf_export('math.less', 'less')
3975 @deprecated_endpoints('less')
3976 def less(x, y, name=None):
3977   r"""Returns the truth value of (x < y) element-wise.
3978 
3979   *NOTE*: `math.less` supports broadcasting. More about broadcasting
3980   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
3981 
3982   Args:
3983     x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
3984     y: A `Tensor`. Must have the same type as `x`.
3985     name: A name for the operation (optional).
3986 
3987   Returns:
3988     A `Tensor` of type `bool`.
3989   """
3990   _ctx = _context._context
3991   if _ctx is None or not _ctx._eager_context.is_eager:
3992     _, _, _op = _op_def_lib._apply_op_helper(
3993         "Less", x=x, y=y, name=name)
3994     _result = _op.outputs[:]
3995     _inputs_flat = _op.inputs
3996     _attrs = ("T", _op.get_attr("T"))
3997     _execute.record_gradient(
3998       "Less", _inputs_flat, _attrs, _result, name)
3999     _result, = _result
4000     return _result
4001 
4002   else:
4003     try:
4004       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4005         _ctx._context_handle, _ctx._eager_context.device_name, "Less", name,
4006         _ctx._post_execution_callbacks, x, y)
4007       return _result
4008     except _core._FallbackException:
4009       return less_eager_fallback(
4010           x, y, name=name, ctx=_ctx)
4011     except _core._NotOkStatusException as e:
4012       if name is not None:
4013         message = e.message + " name: " + name
4014       else:
4015         message = e.message
4016       _six.raise_from(_core._status_to_exception(e.code, message), None)
4017 
4018 
4019 def less_eager_fallback(x, y, name=None, ctx=None):
4020   r"""This is the slowpath function for Eager mode.
4021   This is for function less
4022   """
4023   _ctx = ctx if ctx else _context.context()
4024   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
4025   (x, y) = _inputs_T
4026   _inputs_flat = [x, y]
4027   _attrs = ("T", _attr_T)
4028   _result = _execute.execute(b"Less", 1, inputs=_inputs_flat, attrs=_attrs,
4029                              ctx=_ctx, name=name)
4030   _execute.record_gradient(
4031       "Less", _inputs_flat, _attrs, _result, name)
4032   _result, = _result
4033   return _result
4034 
4035 
4036 @tf_export('math.less_equal', 'less_equal')
4037 @deprecated_endpoints('less_equal')
4038 def less_equal(x, y, name=None):
4039   r"""Returns the truth value of (x <= y) element-wise.
4040 
4041   *NOTE*: `math.less_equal` supports broadcasting. More about broadcasting
4042   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4043 
4044   Args:
4045     x: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
4046     y: A `Tensor`. Must have the same type as `x`.
4047     name: A name for the operation (optional).
4048 
4049   Returns:
4050     A `Tensor` of type `bool`.
4051   """
4052   _ctx = _context._context
4053   if _ctx is None or not _ctx._eager_context.is_eager:
4054     _, _, _op = _op_def_lib._apply_op_helper(
4055         "LessEqual", x=x, y=y, name=name)
4056     _result = _op.outputs[:]
4057     _inputs_flat = _op.inputs
4058     _attrs = ("T", _op.get_attr("T"))
4059     _execute.record_gradient(
4060       "LessEqual", _inputs_flat, _attrs, _result, name)
4061     _result, = _result
4062     return _result
4063 
4064   else:
4065     try:
4066       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4067         _ctx._context_handle, _ctx._eager_context.device_name, "LessEqual",
4068         name, _ctx._post_execution_callbacks, x, y)
4069       return _result
4070     except _core._FallbackException:
4071       return less_equal_eager_fallback(
4072           x, y, name=name, ctx=_ctx)
4073     except _core._NotOkStatusException as e:
4074       if name is not None:
4075         message = e.message + " name: " + name
4076       else:
4077         message = e.message
4078       _six.raise_from(_core._status_to_exception(e.code, message), None)
4079 
4080 
4081 def less_equal_eager_fallback(x, y, name=None, ctx=None):
4082   r"""This is the slowpath function for Eager mode.
4083   This is for function less_equal
4084   """
4085   _ctx = ctx if ctx else _context.context()
4086   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
4087   (x, y) = _inputs_T
4088   _inputs_flat = [x, y]
4089   _attrs = ("T", _attr_T)
4090   _result = _execute.execute(b"LessEqual", 1, inputs=_inputs_flat,
4091                              attrs=_attrs, ctx=_ctx, name=name)
4092   _execute.record_gradient(
4093       "LessEqual", _inputs_flat, _attrs, _result, name)
4094   _result, = _result
4095   return _result
4096 
4097 
4098 @tf_export('math.lgamma', 'lgamma')
4099 @deprecated_endpoints('lgamma')
4100 def lgamma(x, name=None):
4101   r"""Computes the log of the absolute value of `Gamma(x)` element-wise.
4102 
4103   Args:
4104     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
4105     name: A name for the operation (optional).
4106 
4107   Returns:
4108     A `Tensor`. Has the same type as `x`.
4109   """
4110   _ctx = _context._context
4111   if _ctx is None or not _ctx._eager_context.is_eager:
4112     _, _, _op = _op_def_lib._apply_op_helper(
4113         "Lgamma", x=x, name=name)
4114     _result = _op.outputs[:]
4115     _inputs_flat = _op.inputs
4116     _attrs = ("T", _op.get_attr("T"))
4117     _execute.record_gradient(
4118       "Lgamma", _inputs_flat, _attrs, _result, name)
4119     _result, = _result
4120     return _result
4121 
4122   else:
4123     try:
4124       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4125         _ctx._context_handle, _ctx._eager_context.device_name, "Lgamma", name,
4126         _ctx._post_execution_callbacks, x)
4127       return _result
4128     except _core._FallbackException:
4129       return lgamma_eager_fallback(
4130           x, name=name, ctx=_ctx)
4131     except _core._NotOkStatusException as e:
4132       if name is not None:
4133         message = e.message + " name: " + name
4134       else:
4135         message = e.message
4136       _six.raise_from(_core._status_to_exception(e.code, message), None)
4137 
4138 
4139 def lgamma_eager_fallback(x, name=None, ctx=None):
4140   r"""This is the slowpath function for Eager mode.
4141   This is for function lgamma
4142   """
4143   _ctx = ctx if ctx else _context.context()
4144   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
4145   _inputs_flat = [x]
4146   _attrs = ("T", _attr_T)
4147   _result = _execute.execute(b"Lgamma", 1, inputs=_inputs_flat, attrs=_attrs,
4148                              ctx=_ctx, name=name)
4149   _execute.record_gradient(
4150       "Lgamma", _inputs_flat, _attrs, _result, name)
4151   _result, = _result
4152   return _result
4153 
4154 
4155 @tf_export('lin_space', 'linspace')
4156 def lin_space(start, stop, num, name=None):
4157   r"""Generates values in an interval.
4158 
4159   A sequence of `num` evenly-spaced values are generated beginning at `start`.
4160   If `num > 1`, the values in the sequence increase by `stop - start / num - 1`,
4161   so that the last one is exactly `stop`.
4162 
4163   For example:
4164 
4165   ```
4166   tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0  11.0  12.0]
4167   ```
4168 
4169   Args:
4170     start: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`.
4171       0-D tensor. First entry in the range.
4172     stop: A `Tensor`. Must have the same type as `start`.
4173       0-D tensor. Last entry in the range.
4174     num: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4175       0-D tensor. Number of values to generate.
4176     name: A name for the operation (optional).
4177 
4178   Returns:
4179     A `Tensor`. Has the same type as `start`.
4180   """
4181   _ctx = _context._context
4182   if _ctx is None or not _ctx._eager_context.is_eager:
4183     _, _, _op = _op_def_lib._apply_op_helper(
4184         "LinSpace", start=start, stop=stop, num=num, name=name)
4185     _result = _op.outputs[:]
4186     _inputs_flat = _op.inputs
4187     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
4188     _execute.record_gradient(
4189       "LinSpace", _inputs_flat, _attrs, _result, name)
4190     _result, = _result
4191     return _result
4192 
4193   else:
4194     try:
4195       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4196         _ctx._context_handle, _ctx._eager_context.device_name, "LinSpace",
4197         name, _ctx._post_execution_callbacks, start, stop, num)
4198       return _result
4199     except _core._FallbackException:
4200       return lin_space_eager_fallback(
4201           start, stop, num, name=name, ctx=_ctx)
4202     except _core._NotOkStatusException as e:
4203       if name is not None:
4204         message = e.message + " name: " + name
4205       else:
4206         message = e.message
4207       _six.raise_from(_core._status_to_exception(e.code, message), None)
4208 
4209 
4210 def lin_space_eager_fallback(start, stop, num, name=None, ctx=None):
4211   r"""This is the slowpath function for Eager mode.
4212   This is for function lin_space
4213   """
4214   _ctx = ctx if ctx else _context.context()
4215   _attr_T, _inputs_T = _execute.args_to_matching_eager([start, stop], _ctx)
4216   (start, stop) = _inputs_T
4217   _attr_Tidx, (num,) = _execute.args_to_matching_eager([num], _ctx, _dtypes.int32)
4218   _inputs_flat = [start, stop, num]
4219   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
4220   _result = _execute.execute(b"LinSpace", 1, inputs=_inputs_flat,
4221                              attrs=_attrs, ctx=_ctx, name=name)
4222   _execute.record_gradient(
4223       "LinSpace", _inputs_flat, _attrs, _result, name)
4224   _result, = _result
4225   return _result
4226 
4227 
4228 @tf_export('math.log', 'log')
4229 @deprecated_endpoints('log')
4230 def log(x, name=None):
4231   r"""Computes natural logarithm of x element-wise.
4232 
4233   I.e., \\(y = \log_e x\\).
4234 
4235   Args:
4236     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
4237     name: A name for the operation (optional).
4238 
4239   Returns:
4240     A `Tensor`. Has the same type as `x`.
4241   """
4242   _ctx = _context._context
4243   if _ctx is None or not _ctx._eager_context.is_eager:
4244     _, _, _op = _op_def_lib._apply_op_helper(
4245         "Log", x=x, name=name)
4246     _result = _op.outputs[:]
4247     _inputs_flat = _op.inputs
4248     _attrs = ("T", _op.get_attr("T"))
4249     _execute.record_gradient(
4250       "Log", _inputs_flat, _attrs, _result, name)
4251     _result, = _result
4252     return _result
4253 
4254   else:
4255     try:
4256       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4257         _ctx._context_handle, _ctx._eager_context.device_name, "Log", name,
4258         _ctx._post_execution_callbacks, x)
4259       return _result
4260     except _core._FallbackException:
4261       return log_eager_fallback(
4262           x, name=name, ctx=_ctx)
4263     except _core._NotOkStatusException as e:
4264       if name is not None:
4265         message = e.message + " name: " + name
4266       else:
4267         message = e.message
4268       _six.raise_from(_core._status_to_exception(e.code, message), None)
4269 
4270 
4271 def log_eager_fallback(x, name=None, ctx=None):
4272   r"""This is the slowpath function for Eager mode.
4273   This is for function log
4274   """
4275   _ctx = ctx if ctx else _context.context()
4276   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
4277   _inputs_flat = [x]
4278   _attrs = ("T", _attr_T)
4279   _result = _execute.execute(b"Log", 1, inputs=_inputs_flat, attrs=_attrs,
4280                              ctx=_ctx, name=name)
4281   _execute.record_gradient(
4282       "Log", _inputs_flat, _attrs, _result, name)
4283   _result, = _result
4284   return _result
4285 
4286 
4287 @tf_export('math.log1p', 'log1p')
4288 @deprecated_endpoints('log1p')
4289 def log1p(x, name=None):
4290   r"""Computes natural logarithm of (1 + x) element-wise.
4291 
4292   I.e., \\(y = \log_e (1 + x)\\).
4293 
4294   Args:
4295     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
4296     name: A name for the operation (optional).
4297 
4298   Returns:
4299     A `Tensor`. Has the same type as `x`.
4300   """
4301   _ctx = _context._context
4302   if _ctx is None or not _ctx._eager_context.is_eager:
4303     _, _, _op = _op_def_lib._apply_op_helper(
4304         "Log1p", x=x, name=name)
4305     _result = _op.outputs[:]
4306     _inputs_flat = _op.inputs
4307     _attrs = ("T", _op.get_attr("T"))
4308     _execute.record_gradient(
4309       "Log1p", _inputs_flat, _attrs, _result, name)
4310     _result, = _result
4311     return _result
4312 
4313   else:
4314     try:
4315       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4316         _ctx._context_handle, _ctx._eager_context.device_name, "Log1p", name,
4317         _ctx._post_execution_callbacks, x)
4318       return _result
4319     except _core._FallbackException:
4320       return log1p_eager_fallback(
4321           x, name=name, ctx=_ctx)
4322     except _core._NotOkStatusException as e:
4323       if name is not None:
4324         message = e.message + " name: " + name
4325       else:
4326         message = e.message
4327       _six.raise_from(_core._status_to_exception(e.code, message), None)
4328 
4329 
4330 def log1p_eager_fallback(x, name=None, ctx=None):
4331   r"""This is the slowpath function for Eager mode.
4332   This is for function log1p
4333   """
4334   _ctx = ctx if ctx else _context.context()
4335   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
4336   _inputs_flat = [x]
4337   _attrs = ("T", _attr_T)
4338   _result = _execute.execute(b"Log1p", 1, inputs=_inputs_flat, attrs=_attrs,
4339                              ctx=_ctx, name=name)
4340   _execute.record_gradient(
4341       "Log1p", _inputs_flat, _attrs, _result, name)
4342   _result, = _result
4343   return _result
4344 
4345 
4346 @tf_export('math.logical_and', 'logical_and')
4347 @deprecated_endpoints('logical_and')
4348 def logical_and(x, y, name=None):
4349   r"""Returns the truth value of x AND y element-wise.
4350 
4351   *NOTE*: `math.logical_and` supports broadcasting. More about broadcasting
4352   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4353 
4354   Args:
4355     x: A `Tensor` of type `bool`.
4356     y: A `Tensor` of type `bool`.
4357     name: A name for the operation (optional).
4358 
4359   Returns:
4360     A `Tensor` of type `bool`.
4361   """
4362   _ctx = _context._context
4363   if _ctx is None or not _ctx._eager_context.is_eager:
4364     _, _, _op = _op_def_lib._apply_op_helper(
4365         "LogicalAnd", x=x, y=y, name=name)
4366     _result = _op.outputs[:]
4367     _inputs_flat = _op.inputs
4368     _attrs = None
4369     _execute.record_gradient(
4370       "LogicalAnd", _inputs_flat, _attrs, _result, name)
4371     _result, = _result
4372     return _result
4373 
4374   else:
4375     try:
4376       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4377         _ctx._context_handle, _ctx._eager_context.device_name, "LogicalAnd",
4378         name, _ctx._post_execution_callbacks, x, y)
4379       return _result
4380     except _core._FallbackException:
4381       return logical_and_eager_fallback(
4382           x, y, name=name, ctx=_ctx)
4383     except _core._NotOkStatusException as e:
4384       if name is not None:
4385         message = e.message + " name: " + name
4386       else:
4387         message = e.message
4388       _six.raise_from(_core._status_to_exception(e.code, message), None)
4389 
4390 
4391 def logical_and_eager_fallback(x, y, name=None, ctx=None):
4392   r"""This is the slowpath function for Eager mode.
4393   This is for function logical_and
4394   """
4395   _ctx = ctx if ctx else _context.context()
4396   x = _ops.convert_to_tensor(x, _dtypes.bool)
4397   y = _ops.convert_to_tensor(y, _dtypes.bool)
4398   _inputs_flat = [x, y]
4399   _attrs = None
4400   _result = _execute.execute(b"LogicalAnd", 1, inputs=_inputs_flat,
4401                              attrs=_attrs, ctx=_ctx, name=name)
4402   _execute.record_gradient(
4403       "LogicalAnd", _inputs_flat, _attrs, _result, name)
4404   _result, = _result
4405   return _result
4406 
4407 
4408 @tf_export('math.logical_not', 'logical_not')
4409 @deprecated_endpoints('logical_not')
4410 def logical_not(x, name=None):
4411   r"""Returns the truth value of NOT x element-wise.
4412 
4413   Args:
4414     x: A `Tensor` of type `bool`.
4415     name: A name for the operation (optional).
4416 
4417   Returns:
4418     A `Tensor` of type `bool`.
4419   """
4420   _ctx = _context._context
4421   if _ctx is None or not _ctx._eager_context.is_eager:
4422     _, _, _op = _op_def_lib._apply_op_helper(
4423         "LogicalNot", x=x, name=name)
4424     _result = _op.outputs[:]
4425     _inputs_flat = _op.inputs
4426     _attrs = None
4427     _execute.record_gradient(
4428       "LogicalNot", _inputs_flat, _attrs, _result, name)
4429     _result, = _result
4430     return _result
4431 
4432   else:
4433     try:
4434       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4435         _ctx._context_handle, _ctx._eager_context.device_name, "LogicalNot",
4436         name, _ctx._post_execution_callbacks, x)
4437       return _result
4438     except _core._FallbackException:
4439       return logical_not_eager_fallback(
4440           x, name=name, ctx=_ctx)
4441     except _core._NotOkStatusException as e:
4442       if name is not None:
4443         message = e.message + " name: " + name
4444       else:
4445         message = e.message
4446       _six.raise_from(_core._status_to_exception(e.code, message), None)
4447 
4448 
4449 def logical_not_eager_fallback(x, name=None, ctx=None):
4450   r"""This is the slowpath function for Eager mode.
4451   This is for function logical_not
4452   """
4453   _ctx = ctx if ctx else _context.context()
4454   x = _ops.convert_to_tensor(x, _dtypes.bool)
4455   _inputs_flat = [x]
4456   _attrs = None
4457   _result = _execute.execute(b"LogicalNot", 1, inputs=_inputs_flat,
4458                              attrs=_attrs, ctx=_ctx, name=name)
4459   _execute.record_gradient(
4460       "LogicalNot", _inputs_flat, _attrs, _result, name)
4461   _result, = _result
4462   return _result
4463 
4464 
4465 @tf_export('math.logical_or', 'logical_or')
4466 @deprecated_endpoints('logical_or')
4467 def logical_or(x, y, name=None):
4468   r"""Returns the truth value of x OR y element-wise.
4469 
4470   *NOTE*: `math.logical_or` supports broadcasting. More about broadcasting
4471   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4472 
4473   Args:
4474     x: A `Tensor` of type `bool`.
4475     y: A `Tensor` of type `bool`.
4476     name: A name for the operation (optional).
4477 
4478   Returns:
4479     A `Tensor` of type `bool`.
4480   """
4481   _ctx = _context._context
4482   if _ctx is None or not _ctx._eager_context.is_eager:
4483     _, _, _op = _op_def_lib._apply_op_helper(
4484         "LogicalOr", x=x, y=y, name=name)
4485     _result = _op.outputs[:]
4486     _inputs_flat = _op.inputs
4487     _attrs = None
4488     _execute.record_gradient(
4489       "LogicalOr", _inputs_flat, _attrs, _result, name)
4490     _result, = _result
4491     return _result
4492 
4493   else:
4494     try:
4495       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4496         _ctx._context_handle, _ctx._eager_context.device_name, "LogicalOr",
4497         name, _ctx._post_execution_callbacks, x, y)
4498       return _result
4499     except _core._FallbackException:
4500       return logical_or_eager_fallback(
4501           x, y, name=name, ctx=_ctx)
4502     except _core._NotOkStatusException as e:
4503       if name is not None:
4504         message = e.message + " name: " + name
4505       else:
4506         message = e.message
4507       _six.raise_from(_core._status_to_exception(e.code, message), None)
4508 
4509 
4510 def logical_or_eager_fallback(x, y, name=None, ctx=None):
4511   r"""This is the slowpath function for Eager mode.
4512   This is for function logical_or
4513   """
4514   _ctx = ctx if ctx else _context.context()
4515   x = _ops.convert_to_tensor(x, _dtypes.bool)
4516   y = _ops.convert_to_tensor(y, _dtypes.bool)
4517   _inputs_flat = [x, y]
4518   _attrs = None
4519   _result = _execute.execute(b"LogicalOr", 1, inputs=_inputs_flat,
4520                              attrs=_attrs, ctx=_ctx, name=name)
4521   _execute.record_gradient(
4522       "LogicalOr", _inputs_flat, _attrs, _result, name)
4523   _result, = _result
4524   return _result
4525 
4526 
4527 def mat_mul(a, b, transpose_a=False, transpose_b=False, name=None):
4528   r"""Multiply the matrix "a" by the matrix "b".
4529 
4530   The inputs must be two-dimensional matrices and the inner dimension of
4531   "a" (after being transposed if transpose_a is true) must match the
4532   outer dimension of "b" (after being transposed if transposed_b is
4533   true).
4534 
4535   *Note*: The default kernel implementation for MatMul on GPUs uses
4536   cublas.
4537 
4538   Args:
4539     a: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
4540     b: A `Tensor`. Must have the same type as `a`.
4541     transpose_a: An optional `bool`. Defaults to `False`.
4542       If true, "a" is transposed before multiplication.
4543     transpose_b: An optional `bool`. Defaults to `False`.
4544       If true, "b" is transposed before multiplication.
4545     name: A name for the operation (optional).
4546 
4547   Returns:
4548     A `Tensor`. Has the same type as `a`.
4549   """
4550   _ctx = _context._context
4551   if _ctx is None or not _ctx._eager_context.is_eager:
4552     if transpose_a is None:
4553       transpose_a = False
4554     transpose_a = _execute.make_bool(transpose_a, "transpose_a")
4555     if transpose_b is None:
4556       transpose_b = False
4557     transpose_b = _execute.make_bool(transpose_b, "transpose_b")
4558     _, _, _op = _op_def_lib._apply_op_helper(
4559         "MatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b,
4560         name=name)
4561     _result = _op.outputs[:]
4562     _inputs_flat = _op.inputs
4563     _attrs = ("transpose_a", _op.get_attr("transpose_a"), "transpose_b",
4564               _op.get_attr("transpose_b"), "T", _op.get_attr("T"))
4565     _execute.record_gradient(
4566       "MatMul", _inputs_flat, _attrs, _result, name)
4567     _result, = _result
4568     return _result
4569 
4570   else:
4571     try:
4572       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4573         _ctx._context_handle, _ctx._eager_context.device_name, "MatMul", name,
4574         _ctx._post_execution_callbacks, a, b, "transpose_a", transpose_a,
4575         "transpose_b", transpose_b)
4576       return _result
4577     except _core._FallbackException:
4578       return mat_mul_eager_fallback(
4579           a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name,
4580           ctx=_ctx)
4581     except _core._NotOkStatusException as e:
4582       if name is not None:
4583         message = e.message + " name: " + name
4584       else:
4585         message = e.message
4586       _six.raise_from(_core._status_to_exception(e.code, message), None)
4587 
4588 
4589 def mat_mul_eager_fallback(a, b, transpose_a=False, transpose_b=False, name=None, ctx=None):
4590   r"""This is the slowpath function for Eager mode.
4591   This is for function mat_mul
4592   """
4593   _ctx = ctx if ctx else _context.context()
4594   if transpose_a is None:
4595     transpose_a = False
4596   transpose_a = _execute.make_bool(transpose_a, "transpose_a")
4597   if transpose_b is None:
4598     transpose_b = False
4599   transpose_b = _execute.make_bool(transpose_b, "transpose_b")
4600   _attr_T, _inputs_T = _execute.args_to_matching_eager([a, b], _ctx)
4601   (a, b) = _inputs_T
4602   _inputs_flat = [a, b]
4603   _attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b, "T",
4604   _attr_T)
4605   _result = _execute.execute(b"MatMul", 1, inputs=_inputs_flat, attrs=_attrs,
4606                              ctx=_ctx, name=name)
4607   _execute.record_gradient(
4608       "MatMul", _inputs_flat, _attrs, _result, name)
4609   _result, = _result
4610   return _result
4611 
4612 
4613 def _max(input, axis, keep_dims=False, name=None):
4614   r"""Computes the maximum of elements across dimensions of a tensor.
4615 
4616   Reduces `input` along the dimensions given in `axis`. Unless
4617   `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
4618   `axis`. If `keep_dims` is true, the reduced dimensions are
4619   retained with length 1.
4620 
4621   Args:
4622     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
4623       The tensor to reduce.
4624     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4625       The dimensions to reduce. Must be in the range
4626       `[-rank(input), rank(input))`.
4627     keep_dims: An optional `bool`. Defaults to `False`.
4628       If true, retain reduced dimensions with length 1.
4629     name: A name for the operation (optional).
4630 
4631   Returns:
4632     A `Tensor`. Has the same type as `input`.
4633   """
4634   _ctx = _context._context
4635   if _ctx is None or not _ctx._eager_context.is_eager:
4636     if keep_dims is None:
4637       keep_dims = False
4638     keep_dims = _execute.make_bool(keep_dims, "keep_dims")
4639     _, _, _op = _op_def_lib._apply_op_helper(
4640         "Max", input=input, reduction_indices=axis, keep_dims=keep_dims,
4641         name=name)
4642     _result = _op.outputs[:]
4643     _inputs_flat = _op.inputs
4644     _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
4645               "Tidx", _op.get_attr("Tidx"))
4646     _execute.record_gradient(
4647       "Max", _inputs_flat, _attrs, _result, name)
4648     _result, = _result
4649     return _result
4650 
4651   else:
4652     try:
4653       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4654         _ctx._context_handle, _ctx._eager_context.device_name, "Max", name,
4655         _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
4656       return _result
4657     except _core._FallbackException:
4658       return _max_eager_fallback(
4659           input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
4660     except _core._NotOkStatusException as e:
4661       if name is not None:
4662         message = e.message + " name: " + name
4663       else:
4664         message = e.message
4665       _six.raise_from(_core._status_to_exception(e.code, message), None)
4666 
4667 
4668 def _max_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
4669   r"""This is the slowpath function for Eager mode.
4670   This is for function _max
4671   """
4672   _ctx = ctx if ctx else _context.context()
4673   if keep_dims is None:
4674     keep_dims = False
4675   keep_dims = _execute.make_bool(keep_dims, "keep_dims")
4676   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4677   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
4678   _inputs_flat = [input, axis]
4679   _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
4680   _result = _execute.execute(b"Max", 1, inputs=_inputs_flat, attrs=_attrs,
4681                              ctx=_ctx, name=name)
4682   _execute.record_gradient(
4683       "Max", _inputs_flat, _attrs, _result, name)
4684   _result, = _result
4685   return _result
4686 
4687 
4688 @tf_export('math.maximum', 'maximum')
4689 @deprecated_endpoints('maximum')
4690 def maximum(x, y, name=None):
4691   r"""Returns the max of x and y (i.e. x > y ? x : y) element-wise.
4692 
4693   *NOTE*: `math.maximum` supports broadcasting. More about broadcasting
4694   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4695 
4696   Args:
4697     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`.
4698     y: A `Tensor`. Must have the same type as `x`.
4699     name: A name for the operation (optional).
4700 
4701   Returns:
4702     A `Tensor`. Has the same type as `x`.
4703   """
4704   _ctx = _context._context
4705   if _ctx is None or not _ctx._eager_context.is_eager:
4706     _, _, _op = _op_def_lib._apply_op_helper(
4707         "Maximum", x=x, y=y, name=name)
4708     _result = _op.outputs[:]
4709     _inputs_flat = _op.inputs
4710     _attrs = ("T", _op.get_attr("T"))
4711     _execute.record_gradient(
4712       "Maximum", _inputs_flat, _attrs, _result, name)
4713     _result, = _result
4714     return _result
4715 
4716   else:
4717     try:
4718       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4719         _ctx._context_handle, _ctx._eager_context.device_name, "Maximum",
4720         name, _ctx._post_execution_callbacks, x, y)
4721       return _result
4722     except _core._FallbackException:
4723       return maximum_eager_fallback(
4724           x, y, name=name, ctx=_ctx)
4725     except _core._NotOkStatusException as e:
4726       if name is not None:
4727         message = e.message + " name: " + name
4728       else:
4729         message = e.message
4730       _six.raise_from(_core._status_to_exception(e.code, message), None)
4731 
4732 
4733 def maximum_eager_fallback(x, y, name=None, ctx=None):
4734   r"""This is the slowpath function for Eager mode.
4735   This is for function maximum
4736   """
4737   _ctx = ctx if ctx else _context.context()
4738   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
4739   (x, y) = _inputs_T
4740   _inputs_flat = [x, y]
4741   _attrs = ("T", _attr_T)
4742   _result = _execute.execute(b"Maximum", 1, inputs=_inputs_flat, attrs=_attrs,
4743                              ctx=_ctx, name=name)
4744   _execute.record_gradient(
4745       "Maximum", _inputs_flat, _attrs, _result, name)
4746   _result, = _result
4747   return _result
4748 
4749 
4750 def mean(input, axis, keep_dims=False, name=None):
4751   r"""Computes the mean of elements across dimensions of a tensor.
4752 
4753   Reduces `input` along the dimensions given in `axis`. Unless
4754   `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
4755   `axis`. If `keep_dims` is true, the reduced dimensions are
4756   retained with length 1.
4757 
4758   Args:
4759     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
4760       The tensor to reduce.
4761     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4762       The dimensions to reduce. Must be in the range
4763       `[-rank(input), rank(input))`.
4764     keep_dims: An optional `bool`. Defaults to `False`.
4765       If true, retain reduced dimensions with length 1.
4766     name: A name for the operation (optional).
4767 
4768   Returns:
4769     A `Tensor`. Has the same type as `input`.
4770   """
4771   _ctx = _context._context
4772   if _ctx is None or not _ctx._eager_context.is_eager:
4773     if keep_dims is None:
4774       keep_dims = False
4775     keep_dims = _execute.make_bool(keep_dims, "keep_dims")
4776     _, _, _op = _op_def_lib._apply_op_helper(
4777         "Mean", input=input, reduction_indices=axis, keep_dims=keep_dims,
4778         name=name)
4779     _result = _op.outputs[:]
4780     _inputs_flat = _op.inputs
4781     _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
4782               "Tidx", _op.get_attr("Tidx"))
4783     _execute.record_gradient(
4784       "Mean", _inputs_flat, _attrs, _result, name)
4785     _result, = _result
4786     return _result
4787 
4788   else:
4789     try:
4790       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4791         _ctx._context_handle, _ctx._eager_context.device_name, "Mean", name,
4792         _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
4793       return _result
4794     except _core._FallbackException:
4795       return mean_eager_fallback(
4796           input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
4797     except _core._NotOkStatusException as e:
4798       if name is not None:
4799         message = e.message + " name: " + name
4800       else:
4801         message = e.message
4802       _six.raise_from(_core._status_to_exception(e.code, message), None)
4803 
4804 
4805 def mean_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
4806   r"""This is the slowpath function for Eager mode.
4807   This is for function mean
4808   """
4809   _ctx = ctx if ctx else _context.context()
4810   if keep_dims is None:
4811     keep_dims = False
4812   keep_dims = _execute.make_bool(keep_dims, "keep_dims")
4813   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4814   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
4815   _inputs_flat = [input, axis]
4816   _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
4817   _result = _execute.execute(b"Mean", 1, inputs=_inputs_flat, attrs=_attrs,
4818                              ctx=_ctx, name=name)
4819   _execute.record_gradient(
4820       "Mean", _inputs_flat, _attrs, _result, name)
4821   _result, = _result
4822   return _result
4823 
4824 
4825 def _min(input, axis, keep_dims=False, name=None):
4826   r"""Computes the minimum of elements across dimensions of a tensor.
4827 
4828   Reduces `input` along the dimensions given in `axis`. Unless
4829   `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
4830   `axis`. If `keep_dims` is true, the reduced dimensions are
4831   retained with length 1.
4832 
4833   Args:
4834     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
4835       The tensor to reduce.
4836     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
4837       The dimensions to reduce. Must be in the range
4838       `[-rank(input), rank(input))`.
4839     keep_dims: An optional `bool`. Defaults to `False`.
4840       If true, retain reduced dimensions with length 1.
4841     name: A name for the operation (optional).
4842 
4843   Returns:
4844     A `Tensor`. Has the same type as `input`.
4845   """
4846   _ctx = _context._context
4847   if _ctx is None or not _ctx._eager_context.is_eager:
4848     if keep_dims is None:
4849       keep_dims = False
4850     keep_dims = _execute.make_bool(keep_dims, "keep_dims")
4851     _, _, _op = _op_def_lib._apply_op_helper(
4852         "Min", input=input, reduction_indices=axis, keep_dims=keep_dims,
4853         name=name)
4854     _result = _op.outputs[:]
4855     _inputs_flat = _op.inputs
4856     _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
4857               "Tidx", _op.get_attr("Tidx"))
4858     _execute.record_gradient(
4859       "Min", _inputs_flat, _attrs, _result, name)
4860     _result, = _result
4861     return _result
4862 
4863   else:
4864     try:
4865       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4866         _ctx._context_handle, _ctx._eager_context.device_name, "Min", name,
4867         _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
4868       return _result
4869     except _core._FallbackException:
4870       return _min_eager_fallback(
4871           input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
4872     except _core._NotOkStatusException as e:
4873       if name is not None:
4874         message = e.message + " name: " + name
4875       else:
4876         message = e.message
4877       _six.raise_from(_core._status_to_exception(e.code, message), None)
4878 
4879 
4880 def _min_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
4881   r"""This is the slowpath function for Eager mode.
4882   This is for function _min
4883   """
4884   _ctx = ctx if ctx else _context.context()
4885   if keep_dims is None:
4886     keep_dims = False
4887   keep_dims = _execute.make_bool(keep_dims, "keep_dims")
4888   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
4889   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
4890   _inputs_flat = [input, axis]
4891   _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
4892   _result = _execute.execute(b"Min", 1, inputs=_inputs_flat, attrs=_attrs,
4893                              ctx=_ctx, name=name)
4894   _execute.record_gradient(
4895       "Min", _inputs_flat, _attrs, _result, name)
4896   _result, = _result
4897   return _result
4898 
4899 
4900 @tf_export('math.minimum', 'minimum')
4901 @deprecated_endpoints('minimum')
4902 def minimum(x, y, name=None):
4903   r"""Returns the min of x and y (i.e. x < y ? x : y) element-wise.
4904 
4905   *NOTE*: `math.minimum` supports broadcasting. More about broadcasting
4906   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4907 
4908   Args:
4909     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`.
4910     y: A `Tensor`. Must have the same type as `x`.
4911     name: A name for the operation (optional).
4912 
4913   Returns:
4914     A `Tensor`. Has the same type as `x`.
4915   """
4916   _ctx = _context._context
4917   if _ctx is None or not _ctx._eager_context.is_eager:
4918     _, _, _op = _op_def_lib._apply_op_helper(
4919         "Minimum", x=x, y=y, name=name)
4920     _result = _op.outputs[:]
4921     _inputs_flat = _op.inputs
4922     _attrs = ("T", _op.get_attr("T"))
4923     _execute.record_gradient(
4924       "Minimum", _inputs_flat, _attrs, _result, name)
4925     _result, = _result
4926     return _result
4927 
4928   else:
4929     try:
4930       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4931         _ctx._context_handle, _ctx._eager_context.device_name, "Minimum",
4932         name, _ctx._post_execution_callbacks, x, y)
4933       return _result
4934     except _core._FallbackException:
4935       return minimum_eager_fallback(
4936           x, y, name=name, ctx=_ctx)
4937     except _core._NotOkStatusException as e:
4938       if name is not None:
4939         message = e.message + " name: " + name
4940       else:
4941         message = e.message
4942       _six.raise_from(_core._status_to_exception(e.code, message), None)
4943 
4944 
4945 def minimum_eager_fallback(x, y, name=None, ctx=None):
4946   r"""This is the slowpath function for Eager mode.
4947   This is for function minimum
4948   """
4949   _ctx = ctx if ctx else _context.context()
4950   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
4951   (x, y) = _inputs_T
4952   _inputs_flat = [x, y]
4953   _attrs = ("T", _attr_T)
4954   _result = _execute.execute(b"Minimum", 1, inputs=_inputs_flat, attrs=_attrs,
4955                              ctx=_ctx, name=name)
4956   _execute.record_gradient(
4957       "Minimum", _inputs_flat, _attrs, _result, name)
4958   _result, = _result
4959   return _result
4960 
4961 
4962 def mod(x, y, name=None):
4963   r"""Returns element-wise remainder of division. This emulates C semantics in that
4964 
4965   the result here is consistent with a truncating divide. E.g.
4966   `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`.
4967 
4968   *NOTE*: `Mod` supports broadcasting. More about broadcasting
4969   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
4970 
4971   Args:
4972     x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `half`, `half`, `bfloat16`, `float32`, `float64`.
4973     y: A `Tensor`. Must have the same type as `x`.
4974     name: A name for the operation (optional).
4975 
4976   Returns:
4977     A `Tensor`. Has the same type as `x`.
4978   """
4979   _ctx = _context._context
4980   if _ctx is None or not _ctx._eager_context.is_eager:
4981     _, _, _op = _op_def_lib._apply_op_helper(
4982         "Mod", x=x, y=y, name=name)
4983     _result = _op.outputs[:]
4984     _inputs_flat = _op.inputs
4985     _attrs = ("T", _op.get_attr("T"))
4986     _execute.record_gradient(
4987       "Mod", _inputs_flat, _attrs, _result, name)
4988     _result, = _result
4989     return _result
4990 
4991   else:
4992     try:
4993       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
4994         _ctx._context_handle, _ctx._eager_context.device_name, "Mod", name,
4995         _ctx._post_execution_callbacks, x, y)
4996       return _result
4997     except _core._FallbackException:
4998       return mod_eager_fallback(
4999           x, y, name=name, ctx=_ctx)
5000     except _core._NotOkStatusException as e:
5001       if name is not None:
5002         message = e.message + " name: " + name
5003       else:
5004         message = e.message
5005       _six.raise_from(_core._status_to_exception(e.code, message), None)
5006 
5007 
5008 def mod_eager_fallback(x, y, name=None, ctx=None):
5009   r"""This is the slowpath function for Eager mode.
5010   This is for function mod
5011   """
5012   _ctx = ctx if ctx else _context.context()
5013   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
5014   (x, y) = _inputs_T
5015   _inputs_flat = [x, y]
5016   _attrs = ("T", _attr_T)
5017   _result = _execute.execute(b"Mod", 1, inputs=_inputs_flat, attrs=_attrs,
5018                              ctx=_ctx, name=name)
5019   _execute.record_gradient(
5020       "Mod", _inputs_flat, _attrs, _result, name)
5021   _result, = _result
5022   return _result
5023 
5024 
5025 def mul(x, y, name=None):
5026   r"""Returns x * y element-wise.
5027 
5028   *NOTE*: `Multiply` supports broadcasting. More about broadcasting
5029   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
5030 
5031   Args:
5032     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
5033     y: A `Tensor`. Must have the same type as `x`.
5034     name: A name for the operation (optional).
5035 
5036   Returns:
5037     A `Tensor`. Has the same type as `x`.
5038   """
5039   _ctx = _context._context
5040   if _ctx is None or not _ctx._eager_context.is_eager:
5041     _, _, _op = _op_def_lib._apply_op_helper(
5042         "Mul", x=x, y=y, name=name)
5043     _result = _op.outputs[:]
5044     _inputs_flat = _op.inputs
5045     _attrs = ("T", _op.get_attr("T"))
5046     _execute.record_gradient(
5047       "Mul", _inputs_flat, _attrs, _result, name)
5048     _result, = _result
5049     return _result
5050 
5051   else:
5052     try:
5053       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5054         _ctx._context_handle, _ctx._eager_context.device_name, "Mul", name,
5055         _ctx._post_execution_callbacks, x, y)
5056       return _result
5057     except _core._FallbackException:
5058       return mul_eager_fallback(
5059           x, y, name=name, ctx=_ctx)
5060     except _core._NotOkStatusException as e:
5061       if name is not None:
5062         message = e.message + " name: " + name
5063       else:
5064         message = e.message
5065       _six.raise_from(_core._status_to_exception(e.code, message), None)
5066 
5067 
5068 def mul_eager_fallback(x, y, name=None, ctx=None):
5069   r"""This is the slowpath function for Eager mode.
5070   This is for function mul
5071   """
5072   _ctx = ctx if ctx else _context.context()
5073   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
5074   (x, y) = _inputs_T
5075   _inputs_flat = [x, y]
5076   _attrs = ("T", _attr_T)
5077   _result = _execute.execute(b"Mul", 1, inputs=_inputs_flat, attrs=_attrs,
5078                              ctx=_ctx, name=name)
5079   _execute.record_gradient(
5080       "Mul", _inputs_flat, _attrs, _result, name)
5081   _result, = _result
5082   return _result
5083 
5084 
5085 def neg(x, name=None):
5086   r"""Computes numerical negative value element-wise.
5087 
5088   I.e., \\(y = -x\\).
5089 
5090   Args:
5091     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
5092     name: A name for the operation (optional).
5093 
5094   Returns:
5095     A `Tensor`. Has the same type as `x`.
5096   """
5097   _ctx = _context._context
5098   if _ctx is None or not _ctx._eager_context.is_eager:
5099     _, _, _op = _op_def_lib._apply_op_helper(
5100         "Neg", x=x, name=name)
5101     _result = _op.outputs[:]
5102     _inputs_flat = _op.inputs
5103     _attrs = ("T", _op.get_attr("T"))
5104     _execute.record_gradient(
5105       "Neg", _inputs_flat, _attrs, _result, name)
5106     _result, = _result
5107     return _result
5108 
5109   else:
5110     try:
5111       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5112         _ctx._context_handle, _ctx._eager_context.device_name, "Neg", name,
5113         _ctx._post_execution_callbacks, x)
5114       return _result
5115     except _core._FallbackException:
5116       return neg_eager_fallback(
5117           x, name=name, ctx=_ctx)
5118     except _core._NotOkStatusException as e:
5119       if name is not None:
5120         message = e.message + " name: " + name
5121       else:
5122         message = e.message
5123       _six.raise_from(_core._status_to_exception(e.code, message), None)
5124 
5125 
5126 def neg_eager_fallback(x, name=None, ctx=None):
5127   r"""This is the slowpath function for Eager mode.
5128   This is for function neg
5129   """
5130   _ctx = ctx if ctx else _context.context()
5131   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
5132   _inputs_flat = [x]
5133   _attrs = ("T", _attr_T)
5134   _result = _execute.execute(b"Neg", 1, inputs=_inputs_flat, attrs=_attrs,
5135                              ctx=_ctx, name=name)
5136   _execute.record_gradient(
5137       "Neg", _inputs_flat, _attrs, _result, name)
5138   _result, = _result
5139   return _result
5140 
5141 
5142 @tf_export('math.not_equal', 'not_equal')
5143 @deprecated_endpoints('not_equal')
5144 def not_equal(x, y, name=None):
5145   r"""Returns the truth value of (x != y) element-wise.
5146 
5147   *NOTE*: `math.not_equal` supports broadcasting. More about broadcasting
5148   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
5149 
5150   Args:
5151     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`, `complex64`, `quint8`, `qint8`, `qint32`, `string`, `bool`, `complex128`.
5152     y: A `Tensor`. Must have the same type as `x`.
5153     name: A name for the operation (optional).
5154 
5155   Returns:
5156     A `Tensor` of type `bool`.
5157   """
5158   _ctx = _context._context
5159   if _ctx is None or not _ctx._eager_context.is_eager:
5160     _, _, _op = _op_def_lib._apply_op_helper(
5161         "NotEqual", x=x, y=y, name=name)
5162     _result = _op.outputs[:]
5163     _inputs_flat = _op.inputs
5164     _attrs = ("T", _op.get_attr("T"))
5165     _execute.record_gradient(
5166       "NotEqual", _inputs_flat, _attrs, _result, name)
5167     _result, = _result
5168     return _result
5169 
5170   else:
5171     try:
5172       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5173         _ctx._context_handle, _ctx._eager_context.device_name, "NotEqual",
5174         name, _ctx._post_execution_callbacks, x, y)
5175       return _result
5176     except _core._FallbackException:
5177       return not_equal_eager_fallback(
5178           x, y, name=name, ctx=_ctx)
5179     except _core._NotOkStatusException as e:
5180       if name is not None:
5181         message = e.message + " name: " + name
5182       else:
5183         message = e.message
5184       _six.raise_from(_core._status_to_exception(e.code, message), None)
5185 
5186 
5187 def not_equal_eager_fallback(x, y, name=None, ctx=None):
5188   r"""This is the slowpath function for Eager mode.
5189   This is for function not_equal
5190   """
5191   _ctx = ctx if ctx else _context.context()
5192   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
5193   (x, y) = _inputs_T
5194   _inputs_flat = [x, y]
5195   _attrs = ("T", _attr_T)
5196   _result = _execute.execute(b"NotEqual", 1, inputs=_inputs_flat,
5197                              attrs=_attrs, ctx=_ctx, name=name)
5198   _execute.record_gradient(
5199       "NotEqual", _inputs_flat, _attrs, _result, name)
5200   _result, = _result
5201   return _result
5202 
5203 
5204 @tf_export('math.polygamma', 'polygamma')
5205 @deprecated_endpoints('polygamma')
5206 def polygamma(a, x, name=None):
5207   r"""Compute the polygamma function \\(\psi^{(n)}(x)\\).
5208 
5209   The polygamma function is defined as:
5210 
5211 
5212   \\(\psi^{(n)}(x) = \frac{d^n}{dx^n} \psi(x)\\)
5213 
5214   where \\(\psi(x)\\) is the digamma function.
5215 
5216   Args:
5217     a: A `Tensor`. Must be one of the following types: `float32`, `float64`.
5218     x: A `Tensor`. Must have the same type as `a`.
5219     name: A name for the operation (optional).
5220 
5221   Returns:
5222     A `Tensor`. Has the same type as `a`.
5223   """
5224   _ctx = _context._context
5225   if _ctx is None or not _ctx._eager_context.is_eager:
5226     _, _, _op = _op_def_lib._apply_op_helper(
5227         "Polygamma", a=a, x=x, name=name)
5228     _result = _op.outputs[:]
5229     _inputs_flat = _op.inputs
5230     _attrs = ("T", _op.get_attr("T"))
5231     _execute.record_gradient(
5232       "Polygamma", _inputs_flat, _attrs, _result, name)
5233     _result, = _result
5234     return _result
5235 
5236   else:
5237     try:
5238       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5239         _ctx._context_handle, _ctx._eager_context.device_name, "Polygamma",
5240         name, _ctx._post_execution_callbacks, a, x)
5241       return _result
5242     except _core._FallbackException:
5243       return polygamma_eager_fallback(
5244           a, x, name=name, ctx=_ctx)
5245     except _core._NotOkStatusException as e:
5246       if name is not None:
5247         message = e.message + " name: " + name
5248       else:
5249         message = e.message
5250       _six.raise_from(_core._status_to_exception(e.code, message), None)
5251 
5252 
5253 def polygamma_eager_fallback(a, x, name=None, ctx=None):
5254   r"""This is the slowpath function for Eager mode.
5255   This is for function polygamma
5256   """
5257   _ctx = ctx if ctx else _context.context()
5258   _attr_T, _inputs_T = _execute.args_to_matching_eager([a, x], _ctx)
5259   (a, x) = _inputs_T
5260   _inputs_flat = [a, x]
5261   _attrs = ("T", _attr_T)
5262   _result = _execute.execute(b"Polygamma", 1, inputs=_inputs_flat,
5263                              attrs=_attrs, ctx=_ctx, name=name)
5264   _execute.record_gradient(
5265       "Polygamma", _inputs_flat, _attrs, _result, name)
5266   _result, = _result
5267   return _result
5268 
5269 
5270 def _pow(x, y, name=None):
5271   r"""Computes the power of one value to another.
5272 
5273   Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
5274   corresponding elements in `x` and `y`. For example:
5275 
5276   ```
5277   # tensor 'x' is [[2, 2]], [3, 3]]
5278   # tensor 'y' is [[8, 16], [2, 3]]
5279   tf.pow(x, y) ==> [[256, 65536], [9, 27]]
5280   ```
5281 
5282   Args:
5283     x: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `half`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
5284     y: A `Tensor`. Must have the same type as `x`.
5285     name: A name for the operation (optional).
5286 
5287   Returns:
5288     A `Tensor`. Has the same type as `x`.
5289   """
5290   _ctx = _context._context
5291   if _ctx is None or not _ctx._eager_context.is_eager:
5292     _, _, _op = _op_def_lib._apply_op_helper(
5293         "Pow", x=x, y=y, name=name)
5294     _result = _op.outputs[:]
5295     _inputs_flat = _op.inputs
5296     _attrs = ("T", _op.get_attr("T"))
5297     _execute.record_gradient(
5298       "Pow", _inputs_flat, _attrs, _result, name)
5299     _result, = _result
5300     return _result
5301 
5302   else:
5303     try:
5304       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5305         _ctx._context_handle, _ctx._eager_context.device_name, "Pow", name,
5306         _ctx._post_execution_callbacks, x, y)
5307       return _result
5308     except _core._FallbackException:
5309       return _pow_eager_fallback(
5310           x, y, name=name, ctx=_ctx)
5311     except _core._NotOkStatusException as e:
5312       if name is not None:
5313         message = e.message + " name: " + name
5314       else:
5315         message = e.message
5316       _six.raise_from(_core._status_to_exception(e.code, message), None)
5317 
5318 
5319 def _pow_eager_fallback(x, y, name=None, ctx=None):
5320   r"""This is the slowpath function for Eager mode.
5321   This is for function _pow
5322   """
5323   _ctx = ctx if ctx else _context.context()
5324   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
5325   (x, y) = _inputs_T
5326   _inputs_flat = [x, y]
5327   _attrs = ("T", _attr_T)
5328   _result = _execute.execute(b"Pow", 1, inputs=_inputs_flat, attrs=_attrs,
5329                              ctx=_ctx, name=name)
5330   _execute.record_gradient(
5331       "Pow", _inputs_flat, _attrs, _result, name)
5332   _result, = _result
5333   return _result
5334 
5335 
5336 def prod(input, axis, keep_dims=False, name=None):
5337   r"""Computes the product of elements across dimensions of a tensor.
5338 
5339   Reduces `input` along the dimensions given in `axis`. Unless
5340   `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
5341   `axis`. If `keep_dims` is true, the reduced dimensions are
5342   retained with length 1.
5343 
5344   Args:
5345     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
5346       The tensor to reduce.
5347     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
5348       The dimensions to reduce. Must be in the range
5349       `[-rank(input), rank(input))`.
5350     keep_dims: An optional `bool`. Defaults to `False`.
5351       If true, retain reduced dimensions with length 1.
5352     name: A name for the operation (optional).
5353 
5354   Returns:
5355     A `Tensor`. Has the same type as `input`.
5356   """
5357   _ctx = _context._context
5358   if _ctx is None or not _ctx._eager_context.is_eager:
5359     if keep_dims is None:
5360       keep_dims = False
5361     keep_dims = _execute.make_bool(keep_dims, "keep_dims")
5362     _, _, _op = _op_def_lib._apply_op_helper(
5363         "Prod", input=input, reduction_indices=axis, keep_dims=keep_dims,
5364         name=name)
5365     _result = _op.outputs[:]
5366     _inputs_flat = _op.inputs
5367     _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
5368               "Tidx", _op.get_attr("Tidx"))
5369     _execute.record_gradient(
5370       "Prod", _inputs_flat, _attrs, _result, name)
5371     _result, = _result
5372     return _result
5373 
5374   else:
5375     try:
5376       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5377         _ctx._context_handle, _ctx._eager_context.device_name, "Prod", name,
5378         _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
5379       return _result
5380     except _core._FallbackException:
5381       return prod_eager_fallback(
5382           input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
5383     except _core._NotOkStatusException as e:
5384       if name is not None:
5385         message = e.message + " name: " + name
5386       else:
5387         message = e.message
5388       _six.raise_from(_core._status_to_exception(e.code, message), None)
5389 
5390 
5391 def prod_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
5392   r"""This is the slowpath function for Eager mode.
5393   This is for function prod
5394   """
5395   _ctx = ctx if ctx else _context.context()
5396   if keep_dims is None:
5397     keep_dims = False
5398   keep_dims = _execute.make_bool(keep_dims, "keep_dims")
5399   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
5400   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
5401   _inputs_flat = [input, axis]
5402   _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
5403   _result = _execute.execute(b"Prod", 1, inputs=_inputs_flat, attrs=_attrs,
5404                              ctx=_ctx, name=name)
5405   _execute.record_gradient(
5406       "Prod", _inputs_flat, _attrs, _result, name)
5407   _result, = _result
5408   return _result
5409 
5410 
5411 _quantize_down_and_shrink_range_outputs = ["output", "output_min",
5412                                           "output_max"]
5413 _QuantizeDownAndShrinkRangeOutput = _collections.namedtuple(
5414     "QuantizeDownAndShrinkRange", _quantize_down_and_shrink_range_outputs)
5415 
5416 
5417 def quantize_down_and_shrink_range(input, input_min, input_max, out_type, name=None):
5418   r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the
5419 
5420   actual distribution of the values to maximize the usage of the lower bit depth
5421   and adjusting the output min and max ranges accordingly.
5422 
5423   [input_min, input_max] are scalar floats that specify the range for the float
5424   interpretation of the 'input' data. For example, if input_min is -1.0f and
5425   input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
5426   value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
5427 
5428   This operator tries to squeeze as much precision as possible into an output with
5429   a lower bit depth by calculating the actual min and max values found in the
5430   data. For example, maybe that quint16 input has no values lower than 16,384 and
5431   none higher than 49,152. That means only half the range is actually needed, all
5432   the float interpretations are between -0.5f and 0.5f, so if we want to compress
5433   the data into a quint8 output, we can use that range rather than the theoretical
5434   -1.0f to 1.0f that is suggested by the input min and max.
5435 
5436   In practice, this is most useful for taking output from operations like
5437   QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
5438   may have large potential output ranges, but in practice have a distribution of
5439   input values that only uses a small fraction of the possible range. By feeding
5440   that output into this operator, we can reduce it from 32 bits down to 8 with
5441   minimal loss of accuracy.
5442 
5443   Args:
5444     input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
5445     input_min: A `Tensor` of type `float32`.
5446       The float value that the minimum quantized input value represents.
5447     input_max: A `Tensor` of type `float32`.
5448       The float value that the maximum quantized input value represents.
5449     out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
5450       The type of the output. Should be a lower bit depth than Tinput.
5451     name: A name for the operation (optional).
5452 
5453   Returns:
5454     A tuple of `Tensor` objects (output, output_min, output_max).
5455 
5456     output: A `Tensor` of type `out_type`.
5457     output_min: A `Tensor` of type `float32`.
5458     output_max: A `Tensor` of type `float32`.
5459   """
5460   _ctx = _context._context
5461   if _ctx is None or not _ctx._eager_context.is_eager:
5462     out_type = _execute.make_type(out_type, "out_type")
5463     _, _, _op = _op_def_lib._apply_op_helper(
5464         "QuantizeDownAndShrinkRange", input=input, input_min=input_min,
5465         input_max=input_max, out_type=out_type, name=name)
5466     _result = _op.outputs[:]
5467     _inputs_flat = _op.inputs
5468     _attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
5469               _op.get_attr("out_type"))
5470     _execute.record_gradient(
5471       "QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result, name)
5472     _result = _QuantizeDownAndShrinkRangeOutput._make(_result)
5473     return _result
5474 
5475   else:
5476     try:
5477       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5478         _ctx._context_handle, _ctx._eager_context.device_name,
5479         "QuantizeDownAndShrinkRange", name, _ctx._post_execution_callbacks,
5480         input, input_min, input_max, "out_type", out_type)
5481       _result = _QuantizeDownAndShrinkRangeOutput._make(_result)
5482       return _result
5483     except _core._FallbackException:
5484       return quantize_down_and_shrink_range_eager_fallback(
5485           input, input_min, input_max, out_type=out_type, name=name, ctx=_ctx)
5486     except _core._NotOkStatusException as e:
5487       if name is not None:
5488         message = e.message + " name: " + name
5489       else:
5490         message = e.message
5491       _six.raise_from(_core._status_to_exception(e.code, message), None)
5492 
5493 
5494 def quantize_down_and_shrink_range_eager_fallback(input, input_min, input_max, out_type, name=None, ctx=None):
5495   r"""This is the slowpath function for Eager mode.
5496   This is for function quantize_down_and_shrink_range
5497   """
5498   _ctx = ctx if ctx else _context.context()
5499   out_type = _execute.make_type(out_type, "out_type")
5500   _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
5501   input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
5502   input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
5503   _inputs_flat = [input, input_min, input_max]
5504   _attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
5505   _result = _execute.execute(b"QuantizeDownAndShrinkRange", 3,
5506                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
5507                              name=name)
5508   _execute.record_gradient(
5509       "QuantizeDownAndShrinkRange", _inputs_flat, _attrs, _result, name)
5510   _result = _QuantizeDownAndShrinkRangeOutput._make(_result)
5511   return _result
5512 
5513 
5514 _quantized_add_outputs = ["z", "min_z", "max_z"]
5515 _QuantizedAddOutput = _collections.namedtuple(
5516     "QuantizedAdd", _quantized_add_outputs)
5517 
5518 
5519 def quantized_add(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None):
5520   r"""Returns x + y element-wise, working on quantized buffers.
5521 
5522   Args:
5523     x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
5524     y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
5525     min_x: A `Tensor` of type `float32`.
5526       The float value that the lowest quantized `x` value represents.
5527     max_x: A `Tensor` of type `float32`.
5528       The float value that the highest quantized `x` value represents.
5529     min_y: A `Tensor` of type `float32`.
5530       The float value that the lowest quantized `y` value represents.
5531     max_y: A `Tensor` of type `float32`.
5532       The float value that the highest quantized `y` value represents.
5533     Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
5534     name: A name for the operation (optional).
5535 
5536   Returns:
5537     A tuple of `Tensor` objects (z, min_z, max_z).
5538 
5539     z: A `Tensor` of type `Toutput`.
5540     min_z: A `Tensor` of type `float32`.
5541     max_z: A `Tensor` of type `float32`.
5542   """
5543   _ctx = _context._context
5544   if _ctx is None or not _ctx._eager_context.is_eager:
5545     if Toutput is None:
5546       Toutput = _dtypes.qint32
5547     Toutput = _execute.make_type(Toutput, "Toutput")
5548     _, _, _op = _op_def_lib._apply_op_helper(
5549         "QuantizedAdd", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y,
5550         max_y=max_y, Toutput=Toutput, name=name)
5551     _result = _op.outputs[:]
5552     _inputs_flat = _op.inputs
5553     _attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
5554               _op.get_attr("Toutput"))
5555     _execute.record_gradient(
5556       "QuantizedAdd", _inputs_flat, _attrs, _result, name)
5557     _result = _QuantizedAddOutput._make(_result)
5558     return _result
5559 
5560   else:
5561     try:
5562       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5563         _ctx._context_handle, _ctx._eager_context.device_name, "QuantizedAdd",
5564         name, _ctx._post_execution_callbacks, x, y, min_x, max_x, min_y,
5565         max_y, "Toutput", Toutput)
5566       _result = _QuantizedAddOutput._make(_result)
5567       return _result
5568     except _core._FallbackException:
5569       return quantized_add_eager_fallback(
5570           x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name,
5571           ctx=_ctx)
5572     except _core._NotOkStatusException as e:
5573       if name is not None:
5574         message = e.message + " name: " + name
5575       else:
5576         message = e.message
5577       _six.raise_from(_core._status_to_exception(e.code, message), None)
5578 
5579 
5580 def quantized_add_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None, ctx=None):
5581   r"""This is the slowpath function for Eager mode.
5582   This is for function quantized_add
5583   """
5584   _ctx = ctx if ctx else _context.context()
5585   if Toutput is None:
5586     Toutput = _dtypes.qint32
5587   Toutput = _execute.make_type(Toutput, "Toutput")
5588   _attr_T1, (x,) = _execute.args_to_matching_eager([x], _ctx)
5589   _attr_T2, (y,) = _execute.args_to_matching_eager([y], _ctx)
5590   min_x = _ops.convert_to_tensor(min_x, _dtypes.float32)
5591   max_x = _ops.convert_to_tensor(max_x, _dtypes.float32)
5592   min_y = _ops.convert_to_tensor(min_y, _dtypes.float32)
5593   max_y = _ops.convert_to_tensor(max_y, _dtypes.float32)
5594   _inputs_flat = [x, y, min_x, max_x, min_y, max_y]
5595   _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput)
5596   _result = _execute.execute(b"QuantizedAdd", 3, inputs=_inputs_flat,
5597                              attrs=_attrs, ctx=_ctx, name=name)
5598   _execute.record_gradient(
5599       "QuantizedAdd", _inputs_flat, _attrs, _result, name)
5600   _result = _QuantizedAddOutput._make(_result)
5601   return _result
5602 
5603 
5604 _quantized_mat_mul_outputs = ["out", "min_out", "max_out"]
5605 _QuantizedMatMulOutput = _collections.namedtuple(
5606     "QuantizedMatMul", _quantized_mat_mul_outputs)
5607 
5608 
5609 def quantized_mat_mul(a, b, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, Tactivation=_dtypes.quint8, name=None):
5610   r"""Perform a quantized matrix multiplication of  `a` by the matrix `b`.
5611 
5612   The inputs must be two-dimensional matrices and the inner dimension of
5613   `a` (after being transposed if `transpose_a` is non-zero) must match the
5614   outer dimension of `b` (after being transposed if `transposed_b` is
5615   non-zero).
5616 
5617   Args:
5618     a: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
5619       Must be a two-dimensional tensor.
5620     b: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
5621       Must be a two-dimensional tensor.
5622     min_a: A `Tensor` of type `float32`.
5623       The float value that the lowest quantized `a` value represents.
5624     max_a: A `Tensor` of type `float32`.
5625       The float value that the highest quantized `a` value represents.
5626     min_b: A `Tensor` of type `float32`.
5627       The float value that the lowest quantized `b` value represents.
5628     max_b: A `Tensor` of type `float32`.
5629       The float value that the highest quantized `b` value represents.
5630     Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
5631     transpose_a: An optional `bool`. Defaults to `False`.
5632       If true, `a` is transposed before multiplication.
5633     transpose_b: An optional `bool`. Defaults to `False`.
5634       If true, `b` is transposed before multiplication.
5635     Tactivation: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.quint8`.
5636       The type of output produced by activation function
5637       following this operation.
5638     name: A name for the operation (optional).
5639 
5640   Returns:
5641     A tuple of `Tensor` objects (out, min_out, max_out).
5642 
5643     out: A `Tensor` of type `Toutput`.
5644     min_out: A `Tensor` of type `float32`.
5645     max_out: A `Tensor` of type `float32`.
5646   """
5647   _ctx = _context._context
5648   if _ctx is None or not _ctx._eager_context.is_eager:
5649     if Toutput is None:
5650       Toutput = _dtypes.qint32
5651     Toutput = _execute.make_type(Toutput, "Toutput")
5652     if transpose_a is None:
5653       transpose_a = False
5654     transpose_a = _execute.make_bool(transpose_a, "transpose_a")
5655     if transpose_b is None:
5656       transpose_b = False
5657     transpose_b = _execute.make_bool(transpose_b, "transpose_b")
5658     if Tactivation is None:
5659       Tactivation = _dtypes.quint8
5660     Tactivation = _execute.make_type(Tactivation, "Tactivation")
5661     _, _, _op = _op_def_lib._apply_op_helper(
5662         "QuantizedMatMul", a=a, b=b, min_a=min_a, max_a=max_a, min_b=min_b,
5663         max_b=max_b, Toutput=Toutput, transpose_a=transpose_a,
5664         transpose_b=transpose_b, Tactivation=Tactivation, name=name)
5665     _result = _op.outputs[:]
5666     _inputs_flat = _op.inputs
5667     _attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
5668               _op.get_attr("Toutput"), "transpose_a",
5669               _op.get_attr("transpose_a"), "transpose_b",
5670               _op.get_attr("transpose_b"), "Tactivation",
5671               _op.get_attr("Tactivation"))
5672     _execute.record_gradient(
5673       "QuantizedMatMul", _inputs_flat, _attrs, _result, name)
5674     _result = _QuantizedMatMulOutput._make(_result)
5675     return _result
5676 
5677   else:
5678     try:
5679       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5680         _ctx._context_handle, _ctx._eager_context.device_name,
5681         "QuantizedMatMul", name, _ctx._post_execution_callbacks, a, b, min_a,
5682         max_a, min_b, max_b, "Toutput", Toutput, "transpose_a", transpose_a,
5683         "transpose_b", transpose_b, "Tactivation", Tactivation)
5684       _result = _QuantizedMatMulOutput._make(_result)
5685       return _result
5686     except _core._FallbackException:
5687       return quantized_mat_mul_eager_fallback(
5688           a, b, min_a, max_a, min_b, max_b, Toutput=Toutput,
5689           transpose_a=transpose_a, transpose_b=transpose_b,
5690           Tactivation=Tactivation, name=name, ctx=_ctx)
5691     except _core._NotOkStatusException as e:
5692       if name is not None:
5693         message = e.message + " name: " + name
5694       else:
5695         message = e.message
5696       _six.raise_from(_core._status_to_exception(e.code, message), None)
5697 
5698 
5699 def quantized_mat_mul_eager_fallback(a, b, min_a, max_a, min_b, max_b, Toutput=_dtypes.qint32, transpose_a=False, transpose_b=False, Tactivation=_dtypes.quint8, name=None, ctx=None):
5700   r"""This is the slowpath function for Eager mode.
5701   This is for function quantized_mat_mul
5702   """
5703   _ctx = ctx if ctx else _context.context()
5704   if Toutput is None:
5705     Toutput = _dtypes.qint32
5706   Toutput = _execute.make_type(Toutput, "Toutput")
5707   if transpose_a is None:
5708     transpose_a = False
5709   transpose_a = _execute.make_bool(transpose_a, "transpose_a")
5710   if transpose_b is None:
5711     transpose_b = False
5712   transpose_b = _execute.make_bool(transpose_b, "transpose_b")
5713   if Tactivation is None:
5714     Tactivation = _dtypes.quint8
5715   Tactivation = _execute.make_type(Tactivation, "Tactivation")
5716   _attr_T1, (a,) = _execute.args_to_matching_eager([a], _ctx)
5717   _attr_T2, (b,) = _execute.args_to_matching_eager([b], _ctx)
5718   min_a = _ops.convert_to_tensor(min_a, _dtypes.float32)
5719   max_a = _ops.convert_to_tensor(max_a, _dtypes.float32)
5720   min_b = _ops.convert_to_tensor(min_b, _dtypes.float32)
5721   max_b = _ops.convert_to_tensor(max_b, _dtypes.float32)
5722   _inputs_flat = [a, b, min_a, max_a, min_b, max_b]
5723   _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput, "transpose_a",
5724   transpose_a, "transpose_b", transpose_b, "Tactivation", Tactivation)
5725   _result = _execute.execute(b"QuantizedMatMul", 3, inputs=_inputs_flat,
5726                              attrs=_attrs, ctx=_ctx, name=name)
5727   _execute.record_gradient(
5728       "QuantizedMatMul", _inputs_flat, _attrs, _result, name)
5729   _result = _QuantizedMatMulOutput._make(_result)
5730   return _result
5731 
5732 
5733 _quantized_mul_outputs = ["z", "min_z", "max_z"]
5734 _QuantizedMulOutput = _collections.namedtuple(
5735     "QuantizedMul", _quantized_mul_outputs)
5736 
5737 
5738 def quantized_mul(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None):
5739   r"""Returns x * y element-wise, working on quantized buffers.
5740 
5741   Args:
5742     x: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
5743     y: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
5744     min_x: A `Tensor` of type `float32`.
5745       The float value that the lowest quantized `x` value represents.
5746     max_x: A `Tensor` of type `float32`.
5747       The float value that the highest quantized `x` value represents.
5748     min_y: A `Tensor` of type `float32`.
5749       The float value that the lowest quantized `y` value represents.
5750     max_y: A `Tensor` of type `float32`.
5751       The float value that the highest quantized `y` value represents.
5752     Toutput: An optional `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`. Defaults to `tf.qint32`.
5753     name: A name for the operation (optional).
5754 
5755   Returns:
5756     A tuple of `Tensor` objects (z, min_z, max_z).
5757 
5758     z: A `Tensor` of type `Toutput`.
5759     min_z: A `Tensor` of type `float32`.
5760     max_z: A `Tensor` of type `float32`.
5761   """
5762   _ctx = _context._context
5763   if _ctx is None or not _ctx._eager_context.is_eager:
5764     if Toutput is None:
5765       Toutput = _dtypes.qint32
5766     Toutput = _execute.make_type(Toutput, "Toutput")
5767     _, _, _op = _op_def_lib._apply_op_helper(
5768         "QuantizedMul", x=x, y=y, min_x=min_x, max_x=max_x, min_y=min_y,
5769         max_y=max_y, Toutput=Toutput, name=name)
5770     _result = _op.outputs[:]
5771     _inputs_flat = _op.inputs
5772     _attrs = ("T1", _op.get_attr("T1"), "T2", _op.get_attr("T2"), "Toutput",
5773               _op.get_attr("Toutput"))
5774     _execute.record_gradient(
5775       "QuantizedMul", _inputs_flat, _attrs, _result, name)
5776     _result = _QuantizedMulOutput._make(_result)
5777     return _result
5778 
5779   else:
5780     try:
5781       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5782         _ctx._context_handle, _ctx._eager_context.device_name, "QuantizedMul",
5783         name, _ctx._post_execution_callbacks, x, y, min_x, max_x, min_y,
5784         max_y, "Toutput", Toutput)
5785       _result = _QuantizedMulOutput._make(_result)
5786       return _result
5787     except _core._FallbackException:
5788       return quantized_mul_eager_fallback(
5789           x, y, min_x, max_x, min_y, max_y, Toutput=Toutput, name=name,
5790           ctx=_ctx)
5791     except _core._NotOkStatusException as e:
5792       if name is not None:
5793         message = e.message + " name: " + name
5794       else:
5795         message = e.message
5796       _six.raise_from(_core._status_to_exception(e.code, message), None)
5797 
5798 
5799 def quantized_mul_eager_fallback(x, y, min_x, max_x, min_y, max_y, Toutput=_dtypes.qint32, name=None, ctx=None):
5800   r"""This is the slowpath function for Eager mode.
5801   This is for function quantized_mul
5802   """
5803   _ctx = ctx if ctx else _context.context()
5804   if Toutput is None:
5805     Toutput = _dtypes.qint32
5806   Toutput = _execute.make_type(Toutput, "Toutput")
5807   _attr_T1, (x,) = _execute.args_to_matching_eager([x], _ctx)
5808   _attr_T2, (y,) = _execute.args_to_matching_eager([y], _ctx)
5809   min_x = _ops.convert_to_tensor(min_x, _dtypes.float32)
5810   max_x = _ops.convert_to_tensor(max_x, _dtypes.float32)
5811   min_y = _ops.convert_to_tensor(min_y, _dtypes.float32)
5812   max_y = _ops.convert_to_tensor(max_y, _dtypes.float32)
5813   _inputs_flat = [x, y, min_x, max_x, min_y, max_y]
5814   _attrs = ("T1", _attr_T1, "T2", _attr_T2, "Toutput", Toutput)
5815   _result = _execute.execute(b"QuantizedMul", 3, inputs=_inputs_flat,
5816                              attrs=_attrs, ctx=_ctx, name=name)
5817   _execute.record_gradient(
5818       "QuantizedMul", _inputs_flat, _attrs, _result, name)
5819   _result = _QuantizedMulOutput._make(_result)
5820   return _result
5821 
5822 
5823 def _range(start, limit, delta, name=None):
5824   r"""Creates a sequence of numbers.
5825 
5826   This operation creates a sequence of numbers that begins at `start` and
5827   extends by increments of `delta` up to but not including `limit`.
5828 
5829   For example:
5830 
5831   ```
5832   # 'start' is 3
5833   # 'limit' is 18
5834   # 'delta' is 3
5835   tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
5836   ```
5837 
5838   Args:
5839     start: A `Tensor`. Must be one of the following types: `bfloat16`, `float32`, `float64`, `int32`, `int64`.
5840       0-D (scalar). First entry in the sequence.
5841     limit: A `Tensor`. Must have the same type as `start`.
5842       0-D (scalar). Upper limit of sequence, exclusive.
5843     delta: A `Tensor`. Must have the same type as `start`.
5844       0-D (scalar). Optional. Default is 1. Number that increments `start`.
5845     name: A name for the operation (optional).
5846 
5847   Returns:
5848     A `Tensor`. Has the same type as `start`.
5849   """
5850   _ctx = _context._context
5851   if _ctx is None or not _ctx._eager_context.is_eager:
5852     _, _, _op = _op_def_lib._apply_op_helper(
5853         "Range", start=start, limit=limit, delta=delta, name=name)
5854     _result = _op.outputs[:]
5855     _inputs_flat = _op.inputs
5856     _attrs = ("Tidx", _op.get_attr("Tidx"))
5857     _execute.record_gradient(
5858       "Range", _inputs_flat, _attrs, _result, name)
5859     _result, = _result
5860     return _result
5861 
5862   else:
5863     try:
5864       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5865         _ctx._context_handle, _ctx._eager_context.device_name, "Range", name,
5866         _ctx._post_execution_callbacks, start, limit, delta)
5867       return _result
5868     except _core._FallbackException:
5869       return _range_eager_fallback(
5870           start, limit, delta, name=name, ctx=_ctx)
5871     except _core._NotOkStatusException as e:
5872       if name is not None:
5873         message = e.message + " name: " + name
5874       else:
5875         message = e.message
5876       _six.raise_from(_core._status_to_exception(e.code, message), None)
5877 
5878 
5879 def _range_eager_fallback(start, limit, delta, name=None, ctx=None):
5880   r"""This is the slowpath function for Eager mode.
5881   This is for function _range
5882   """
5883   _ctx = ctx if ctx else _context.context()
5884   _attr_Tidx, _inputs_Tidx = _execute.args_to_matching_eager([start, limit, delta], _ctx, _dtypes.int32)
5885   (start, limit, delta) = _inputs_Tidx
5886   _inputs_flat = [start, limit, delta]
5887   _attrs = ("Tidx", _attr_Tidx)
5888   _result = _execute.execute(b"Range", 1, inputs=_inputs_flat, attrs=_attrs,
5889                              ctx=_ctx, name=name)
5890   _execute.record_gradient(
5891       "Range", _inputs_flat, _attrs, _result, name)
5892   _result, = _result
5893   return _result
5894 
5895 
5896 def real(input, Tout=_dtypes.float32, name=None):
5897   r"""Returns the real part of a complex number.
5898 
5899   Given a tensor `input` of complex numbers, this operation returns a tensor of
5900   type `float` that is the real part of each element in `input`. All elements in
5901   `input` must be complex numbers of the form \\(a + bj\\), where *a* is the real
5902    part returned by this operation and *b* is the imaginary part.
5903 
5904   For example:
5905 
5906   ```
5907   # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
5908   tf.real(input) ==> [-2.25, 3.25]
5909   ```
5910 
5911   Args:
5912     input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
5913     Tout: An optional `tf.DType` from: `tf.float32, tf.float64`. Defaults to `tf.float32`.
5914     name: A name for the operation (optional).
5915 
5916   Returns:
5917     A `Tensor` of type `Tout`.
5918   """
5919   _ctx = _context._context
5920   if _ctx is None or not _ctx._eager_context.is_eager:
5921     if Tout is None:
5922       Tout = _dtypes.float32
5923     Tout = _execute.make_type(Tout, "Tout")
5924     _, _, _op = _op_def_lib._apply_op_helper(
5925         "Real", input=input, Tout=Tout, name=name)
5926     _result = _op.outputs[:]
5927     _inputs_flat = _op.inputs
5928     _attrs = ("T", _op.get_attr("T"), "Tout", _op.get_attr("Tout"))
5929     _execute.record_gradient(
5930       "Real", _inputs_flat, _attrs, _result, name)
5931     _result, = _result
5932     return _result
5933 
5934   else:
5935     try:
5936       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
5937         _ctx._context_handle, _ctx._eager_context.device_name, "Real", name,
5938         _ctx._post_execution_callbacks, input, "Tout", Tout)
5939       return _result
5940     except _core._FallbackException:
5941       return real_eager_fallback(
5942           input, Tout=Tout, name=name, ctx=_ctx)
5943     except _core._NotOkStatusException as e:
5944       if name is not None:
5945         message = e.message + " name: " + name
5946       else:
5947         message = e.message
5948       _six.raise_from(_core._status_to_exception(e.code, message), None)
5949 
5950 
5951 def real_eager_fallback(input, Tout=_dtypes.float32, name=None, ctx=None):
5952   r"""This is the slowpath function for Eager mode.
5953   This is for function real
5954   """
5955   _ctx = ctx if ctx else _context.context()
5956   if Tout is None:
5957     Tout = _dtypes.float32
5958   Tout = _execute.make_type(Tout, "Tout")
5959   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx, _dtypes.complex64)
5960   _inputs_flat = [input]
5961   _attrs = ("T", _attr_T, "Tout", Tout)
5962   _result = _execute.execute(b"Real", 1, inputs=_inputs_flat, attrs=_attrs,
5963                              ctx=_ctx, name=name)
5964   _execute.record_gradient(
5965       "Real", _inputs_flat, _attrs, _result, name)
5966   _result, = _result
5967   return _result
5968 
5969 
5970 def real_div(x, y, name=None):
5971   r"""Returns x / y element-wise for real types.
5972 
5973   If `x` and `y` are reals, this will return the floating-point division.
5974 
5975   *NOTE*: `Div` supports broadcasting. More about broadcasting
5976   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
5977 
5978   Args:
5979     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
5980     y: A `Tensor`. Must have the same type as `x`.
5981     name: A name for the operation (optional).
5982 
5983   Returns:
5984     A `Tensor`. Has the same type as `x`.
5985   """
5986   _ctx = _context._context
5987   if _ctx is None or not _ctx._eager_context.is_eager:
5988     _, _, _op = _op_def_lib._apply_op_helper(
5989         "RealDiv", x=x, y=y, name=name)
5990     _result = _op.outputs[:]
5991     _inputs_flat = _op.inputs
5992     _attrs = ("T", _op.get_attr("T"))
5993     _execute.record_gradient(
5994       "RealDiv", _inputs_flat, _attrs, _result, name)
5995     _result, = _result
5996     return _result
5997 
5998   else:
5999     try:
6000       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6001         _ctx._context_handle, _ctx._eager_context.device_name, "RealDiv",
6002         name, _ctx._post_execution_callbacks, x, y)
6003       return _result
6004     except _core._FallbackException:
6005       return real_div_eager_fallback(
6006           x, y, name=name, ctx=_ctx)
6007     except _core._NotOkStatusException as e:
6008       if name is not None:
6009         message = e.message + " name: " + name
6010       else:
6011         message = e.message
6012       _six.raise_from(_core._status_to_exception(e.code, message), None)
6013 
6014 
6015 def real_div_eager_fallback(x, y, name=None, ctx=None):
6016   r"""This is the slowpath function for Eager mode.
6017   This is for function real_div
6018   """
6019   _ctx = ctx if ctx else _context.context()
6020   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
6021   (x, y) = _inputs_T
6022   _inputs_flat = [x, y]
6023   _attrs = ("T", _attr_T)
6024   _result = _execute.execute(b"RealDiv", 1, inputs=_inputs_flat, attrs=_attrs,
6025                              ctx=_ctx, name=name)
6026   _execute.record_gradient(
6027       "RealDiv", _inputs_flat, _attrs, _result, name)
6028   _result, = _result
6029   return _result
6030 
6031 
6032 @tf_export('math.reciprocal', 'reciprocal')
6033 @deprecated_endpoints('reciprocal')
6034 def reciprocal(x, name=None):
6035   r"""Computes the reciprocal of x element-wise.
6036 
6037   I.e., \\(y = 1 / x\\).
6038 
6039   Args:
6040     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
6041     name: A name for the operation (optional).
6042 
6043   Returns:
6044     A `Tensor`. Has the same type as `x`.
6045   """
6046   _ctx = _context._context
6047   if _ctx is None or not _ctx._eager_context.is_eager:
6048     _, _, _op = _op_def_lib._apply_op_helper(
6049         "Reciprocal", x=x, name=name)
6050     _result = _op.outputs[:]
6051     _inputs_flat = _op.inputs
6052     _attrs = ("T", _op.get_attr("T"))
6053     _execute.record_gradient(
6054       "Reciprocal", _inputs_flat, _attrs, _result, name)
6055     _result, = _result
6056     return _result
6057 
6058   else:
6059     try:
6060       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6061         _ctx._context_handle, _ctx._eager_context.device_name, "Reciprocal",
6062         name, _ctx._post_execution_callbacks, x)
6063       return _result
6064     except _core._FallbackException:
6065       return reciprocal_eager_fallback(
6066           x, name=name, ctx=_ctx)
6067     except _core._NotOkStatusException as e:
6068       if name is not None:
6069         message = e.message + " name: " + name
6070       else:
6071         message = e.message
6072       _six.raise_from(_core._status_to_exception(e.code, message), None)
6073 
6074 
6075 def reciprocal_eager_fallback(x, name=None, ctx=None):
6076   r"""This is the slowpath function for Eager mode.
6077   This is for function reciprocal
6078   """
6079   _ctx = ctx if ctx else _context.context()
6080   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
6081   _inputs_flat = [x]
6082   _attrs = ("T", _attr_T)
6083   _result = _execute.execute(b"Reciprocal", 1, inputs=_inputs_flat,
6084                              attrs=_attrs, ctx=_ctx, name=name)
6085   _execute.record_gradient(
6086       "Reciprocal", _inputs_flat, _attrs, _result, name)
6087   _result, = _result
6088   return _result
6089 
6090 
6091 def reciprocal_grad(y, dy, name=None):
6092   r"""Computes the gradient for the inverse of `x` wrt its input.
6093 
6094   Specifically, `grad = -dy * y*y`, where `y = 1/x`, and `dy`
6095   is the corresponding input gradient.
6096 
6097   Args:
6098     y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
6099     dy: A `Tensor`. Must have the same type as `y`.
6100     name: A name for the operation (optional).
6101 
6102   Returns:
6103     A `Tensor`. Has the same type as `y`.
6104   """
6105   _ctx = _context._context
6106   if _ctx is None or not _ctx._eager_context.is_eager:
6107     _, _, _op = _op_def_lib._apply_op_helper(
6108         "ReciprocalGrad", y=y, dy=dy, name=name)
6109     _result = _op.outputs[:]
6110     _inputs_flat = _op.inputs
6111     _attrs = ("T", _op.get_attr("T"))
6112     _execute.record_gradient(
6113       "ReciprocalGrad", _inputs_flat, _attrs, _result, name)
6114     _result, = _result
6115     return _result
6116 
6117   else:
6118     try:
6119       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6120         _ctx._context_handle, _ctx._eager_context.device_name,
6121         "ReciprocalGrad", name, _ctx._post_execution_callbacks, y, dy)
6122       return _result
6123     except _core._FallbackException:
6124       return reciprocal_grad_eager_fallback(
6125           y, dy, name=name, ctx=_ctx)
6126     except _core._NotOkStatusException as e:
6127       if name is not None:
6128         message = e.message + " name: " + name
6129       else:
6130         message = e.message
6131       _six.raise_from(_core._status_to_exception(e.code, message), None)
6132 
6133 
6134 def reciprocal_grad_eager_fallback(y, dy, name=None, ctx=None):
6135   r"""This is the slowpath function for Eager mode.
6136   This is for function reciprocal_grad
6137   """
6138   _ctx = ctx if ctx else _context.context()
6139   _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
6140   (y, dy) = _inputs_T
6141   _inputs_flat = [y, dy]
6142   _attrs = ("T", _attr_T)
6143   _result = _execute.execute(b"ReciprocalGrad", 1, inputs=_inputs_flat,
6144                              attrs=_attrs, ctx=_ctx, name=name)
6145   _execute.record_gradient(
6146       "ReciprocalGrad", _inputs_flat, _attrs, _result, name)
6147   _result, = _result
6148   return _result
6149 
6150 
6151 _requantization_range_outputs = ["output_min", "output_max"]
6152 _RequantizationRangeOutput = _collections.namedtuple(
6153     "RequantizationRange", _requantization_range_outputs)
6154 
6155 
6156 def requantization_range(input, input_min, input_max, name=None):
6157   r"""Given a quantized tensor described by (input, input_min, input_max), outputs a
6158 
6159   range that covers the actual values present in that tensor.  This op is
6160   typically used to produce the requested_output_min and requested_output_max for
6161   Requantize.
6162 
6163   Args:
6164     input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6165     input_min: A `Tensor` of type `float32`.
6166       The float value that the minimum quantized input value represents.
6167     input_max: A `Tensor` of type `float32`.
6168       The float value that the maximum quantized input value represents.
6169     name: A name for the operation (optional).
6170 
6171   Returns:
6172     A tuple of `Tensor` objects (output_min, output_max).
6173 
6174     output_min: A `Tensor` of type `float32`.
6175     output_max: A `Tensor` of type `float32`.
6176   """
6177   _ctx = _context._context
6178   if _ctx is None or not _ctx._eager_context.is_eager:
6179     _, _, _op = _op_def_lib._apply_op_helper(
6180         "RequantizationRange", input=input, input_min=input_min,
6181         input_max=input_max, name=name)
6182     _result = _op.outputs[:]
6183     _inputs_flat = _op.inputs
6184     _attrs = ("Tinput", _op.get_attr("Tinput"))
6185     _execute.record_gradient(
6186       "RequantizationRange", _inputs_flat, _attrs, _result, name)
6187     _result = _RequantizationRangeOutput._make(_result)
6188     return _result
6189 
6190   else:
6191     try:
6192       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6193         _ctx._context_handle, _ctx._eager_context.device_name,
6194         "RequantizationRange", name, _ctx._post_execution_callbacks, input,
6195         input_min, input_max)
6196       _result = _RequantizationRangeOutput._make(_result)
6197       return _result
6198     except _core._FallbackException:
6199       return requantization_range_eager_fallback(
6200           input, input_min, input_max, name=name, ctx=_ctx)
6201     except _core._NotOkStatusException as e:
6202       if name is not None:
6203         message = e.message + " name: " + name
6204       else:
6205         message = e.message
6206       _six.raise_from(_core._status_to_exception(e.code, message), None)
6207 
6208 
6209 def requantization_range_eager_fallback(input, input_min, input_max, name=None, ctx=None):
6210   r"""This is the slowpath function for Eager mode.
6211   This is for function requantization_range
6212   """
6213   _ctx = ctx if ctx else _context.context()
6214   _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
6215   input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
6216   input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
6217   _inputs_flat = [input, input_min, input_max]
6218   _attrs = ("Tinput", _attr_Tinput)
6219   _result = _execute.execute(b"RequantizationRange", 2, inputs=_inputs_flat,
6220                              attrs=_attrs, ctx=_ctx, name=name)
6221   _execute.record_gradient(
6222       "RequantizationRange", _inputs_flat, _attrs, _result, name)
6223   _result = _RequantizationRangeOutput._make(_result)
6224   return _result
6225 
6226 
6227 _requantize_outputs = ["output", "output_min", "output_max"]
6228 _RequantizeOutput = _collections.namedtuple(
6229     "Requantize", _requantize_outputs)
6230 
6231 
6232 def requantize(input, input_min, input_max, requested_output_min, requested_output_max, out_type, name=None):
6233   r"""Convert the quantized 'input' tensor into a lower-precision 'output', using the
6234 
6235   output range specified with 'requested_output_min' and 'requested_output_max'.
6236 
6237   [input_min, input_max] are scalar floats that specify the range for the float
6238   interpretation of the 'input' data. For example, if input_min is -1.0f and
6239   input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
6240   value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
6241 
6242   Args:
6243     input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`.
6244     input_min: A `Tensor` of type `float32`.
6245       The float value that the minimum quantized input value represents.
6246     input_max: A `Tensor` of type `float32`.
6247       The float value that the maximum quantized input value represents.
6248     requested_output_min: A `Tensor` of type `float32`.
6249       The float value that the minimum quantized output value represents.
6250     requested_output_max: A `Tensor` of type `float32`.
6251       The float value that the maximum quantized output value represents.
6252     out_type: A `tf.DType` from: `tf.qint8, tf.quint8, tf.qint32, tf.qint16, tf.quint16`.
6253       The type of the output. Should be a lower bit depth than Tinput.
6254     name: A name for the operation (optional).
6255 
6256   Returns:
6257     A tuple of `Tensor` objects (output, output_min, output_max).
6258 
6259     output: A `Tensor` of type `out_type`.
6260     output_min: A `Tensor` of type `float32`.
6261     output_max: A `Tensor` of type `float32`.
6262   """
6263   _ctx = _context._context
6264   if _ctx is None or not _ctx._eager_context.is_eager:
6265     out_type = _execute.make_type(out_type, "out_type")
6266     _, _, _op = _op_def_lib._apply_op_helper(
6267         "Requantize", input=input, input_min=input_min, input_max=input_max,
6268         requested_output_min=requested_output_min,
6269         requested_output_max=requested_output_max, out_type=out_type,
6270         name=name)
6271     _result = _op.outputs[:]
6272     _inputs_flat = _op.inputs
6273     _attrs = ("Tinput", _op.get_attr("Tinput"), "out_type",
6274               _op.get_attr("out_type"))
6275     _execute.record_gradient(
6276       "Requantize", _inputs_flat, _attrs, _result, name)
6277     _result = _RequantizeOutput._make(_result)
6278     return _result
6279 
6280   else:
6281     try:
6282       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6283         _ctx._context_handle, _ctx._eager_context.device_name, "Requantize",
6284         name, _ctx._post_execution_callbacks, input, input_min, input_max,
6285         requested_output_min, requested_output_max, "out_type", out_type)
6286       _result = _RequantizeOutput._make(_result)
6287       return _result
6288     except _core._FallbackException:
6289       return requantize_eager_fallback(
6290           input, input_min, input_max, requested_output_min,
6291           requested_output_max, out_type=out_type, name=name, ctx=_ctx)
6292     except _core._NotOkStatusException as e:
6293       if name is not None:
6294         message = e.message + " name: " + name
6295       else:
6296         message = e.message
6297       _six.raise_from(_core._status_to_exception(e.code, message), None)
6298 
6299 
6300 def requantize_eager_fallback(input, input_min, input_max, requested_output_min, requested_output_max, out_type, name=None, ctx=None):
6301   r"""This is the slowpath function for Eager mode.
6302   This is for function requantize
6303   """
6304   _ctx = ctx if ctx else _context.context()
6305   out_type = _execute.make_type(out_type, "out_type")
6306   _attr_Tinput, (input,) = _execute.args_to_matching_eager([input], _ctx)
6307   input_min = _ops.convert_to_tensor(input_min, _dtypes.float32)
6308   input_max = _ops.convert_to_tensor(input_max, _dtypes.float32)
6309   requested_output_min = _ops.convert_to_tensor(requested_output_min, _dtypes.float32)
6310   requested_output_max = _ops.convert_to_tensor(requested_output_max, _dtypes.float32)
6311   _inputs_flat = [input, input_min, input_max, requested_output_min, requested_output_max]
6312   _attrs = ("Tinput", _attr_Tinput, "out_type", out_type)
6313   _result = _execute.execute(b"Requantize", 3, inputs=_inputs_flat,
6314                              attrs=_attrs, ctx=_ctx, name=name)
6315   _execute.record_gradient(
6316       "Requantize", _inputs_flat, _attrs, _result, name)
6317   _result = _RequantizeOutput._make(_result)
6318   return _result
6319 
6320 
6321 @tf_export('math.rint', 'rint')
6322 @deprecated_endpoints('rint')
6323 def rint(x, name=None):
6324   r"""Returns element-wise integer closest to x.
6325 
6326   If the result is midway between two representable values,
6327   the even representable is chosen.
6328   For example:
6329 
6330   ```
6331   rint(-1.5) ==> -2.0
6332   rint(0.5000001) ==> 1.0
6333   rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.]
6334   ```
6335 
6336   Args:
6337     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
6338     name: A name for the operation (optional).
6339 
6340   Returns:
6341     A `Tensor`. Has the same type as `x`.
6342   """
6343   _ctx = _context._context
6344   if _ctx is None or not _ctx._eager_context.is_eager:
6345     _, _, _op = _op_def_lib._apply_op_helper(
6346         "Rint", x=x, name=name)
6347     _result = _op.outputs[:]
6348     _inputs_flat = _op.inputs
6349     _attrs = ("T", _op.get_attr("T"))
6350     _execute.record_gradient(
6351       "Rint", _inputs_flat, _attrs, _result, name)
6352     _result, = _result
6353     return _result
6354 
6355   else:
6356     try:
6357       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6358         _ctx._context_handle, _ctx._eager_context.device_name, "Rint", name,
6359         _ctx._post_execution_callbacks, x)
6360       return _result
6361     except _core._FallbackException:
6362       return rint_eager_fallback(
6363           x, name=name, ctx=_ctx)
6364     except _core._NotOkStatusException as e:
6365       if name is not None:
6366         message = e.message + " name: " + name
6367       else:
6368         message = e.message
6369       _six.raise_from(_core._status_to_exception(e.code, message), None)
6370 
6371 
6372 def rint_eager_fallback(x, name=None, ctx=None):
6373   r"""This is the slowpath function for Eager mode.
6374   This is for function rint
6375   """
6376   _ctx = ctx if ctx else _context.context()
6377   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
6378   _inputs_flat = [x]
6379   _attrs = ("T", _attr_T)
6380   _result = _execute.execute(b"Rint", 1, inputs=_inputs_flat, attrs=_attrs,
6381                              ctx=_ctx, name=name)
6382   _execute.record_gradient(
6383       "Rint", _inputs_flat, _attrs, _result, name)
6384   _result, = _result
6385   return _result
6386 
6387 
6388 def round(x, name=None):
6389   r"""Rounds the values of a tensor to the nearest integer, element-wise.
6390 
6391   Rounds half to even.  Also known as bankers rounding. If you want to round
6392   according to the current system rounding mode use std::cint.
6393 
6394   Args:
6395     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
6396     name: A name for the operation (optional).
6397 
6398   Returns:
6399     A `Tensor`. Has the same type as `x`.
6400   """
6401   _ctx = _context._context
6402   if _ctx is None or not _ctx._eager_context.is_eager:
6403     _, _, _op = _op_def_lib._apply_op_helper(
6404         "Round", x=x, name=name)
6405     _result = _op.outputs[:]
6406     _inputs_flat = _op.inputs
6407     _attrs = ("T", _op.get_attr("T"))
6408     _execute.record_gradient(
6409       "Round", _inputs_flat, _attrs, _result, name)
6410     _result, = _result
6411     return _result
6412 
6413   else:
6414     try:
6415       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6416         _ctx._context_handle, _ctx._eager_context.device_name, "Round", name,
6417         _ctx._post_execution_callbacks, x)
6418       return _result
6419     except _core._FallbackException:
6420       return round_eager_fallback(
6421           x, name=name, ctx=_ctx)
6422     except _core._NotOkStatusException as e:
6423       if name is not None:
6424         message = e.message + " name: " + name
6425       else:
6426         message = e.message
6427       _six.raise_from(_core._status_to_exception(e.code, message), None)
6428 
6429 
6430 def round_eager_fallback(x, name=None, ctx=None):
6431   r"""This is the slowpath function for Eager mode.
6432   This is for function round
6433   """
6434   _ctx = ctx if ctx else _context.context()
6435   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
6436   _inputs_flat = [x]
6437   _attrs = ("T", _attr_T)
6438   _result = _execute.execute(b"Round", 1, inputs=_inputs_flat, attrs=_attrs,
6439                              ctx=_ctx, name=name)
6440   _execute.record_gradient(
6441       "Round", _inputs_flat, _attrs, _result, name)
6442   _result, = _result
6443   return _result
6444 
6445 
6446 @tf_export('math.rsqrt', 'rsqrt')
6447 @deprecated_endpoints('rsqrt')
6448 def rsqrt(x, name=None):
6449   r"""Computes reciprocal of square root of x element-wise.
6450 
6451   I.e., \\(y = 1 / \sqrt{x}\\).
6452 
6453   Args:
6454     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
6455     name: A name for the operation (optional).
6456 
6457   Returns:
6458     A `Tensor`. Has the same type as `x`.
6459   """
6460   _ctx = _context._context
6461   if _ctx is None or not _ctx._eager_context.is_eager:
6462     _, _, _op = _op_def_lib._apply_op_helper(
6463         "Rsqrt", x=x, name=name)
6464     _result = _op.outputs[:]
6465     _inputs_flat = _op.inputs
6466     _attrs = ("T", _op.get_attr("T"))
6467     _execute.record_gradient(
6468       "Rsqrt", _inputs_flat, _attrs, _result, name)
6469     _result, = _result
6470     return _result
6471 
6472   else:
6473     try:
6474       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6475         _ctx._context_handle, _ctx._eager_context.device_name, "Rsqrt", name,
6476         _ctx._post_execution_callbacks, x)
6477       return _result
6478     except _core._FallbackException:
6479       return rsqrt_eager_fallback(
6480           x, name=name, ctx=_ctx)
6481     except _core._NotOkStatusException as e:
6482       if name is not None:
6483         message = e.message + " name: " + name
6484       else:
6485         message = e.message
6486       _six.raise_from(_core._status_to_exception(e.code, message), None)
6487 
6488 
6489 def rsqrt_eager_fallback(x, name=None, ctx=None):
6490   r"""This is the slowpath function for Eager mode.
6491   This is for function rsqrt
6492   """
6493   _ctx = ctx if ctx else _context.context()
6494   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
6495   _inputs_flat = [x]
6496   _attrs = ("T", _attr_T)
6497   _result = _execute.execute(b"Rsqrt", 1, inputs=_inputs_flat, attrs=_attrs,
6498                              ctx=_ctx, name=name)
6499   _execute.record_gradient(
6500       "Rsqrt", _inputs_flat, _attrs, _result, name)
6501   _result, = _result
6502   return _result
6503 
6504 
6505 def rsqrt_grad(y, dy, name=None):
6506   r"""Computes the gradient for the rsqrt of `x` wrt its input.
6507 
6508   Specifically, `grad = dy * -0.5 * y^3`, where `y = rsqrt(x)`, and `dy`
6509   is the corresponding input gradient.
6510 
6511   Args:
6512     y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
6513     dy: A `Tensor`. Must have the same type as `y`.
6514     name: A name for the operation (optional).
6515 
6516   Returns:
6517     A `Tensor`. Has the same type as `y`.
6518   """
6519   _ctx = _context._context
6520   if _ctx is None or not _ctx._eager_context.is_eager:
6521     _, _, _op = _op_def_lib._apply_op_helper(
6522         "RsqrtGrad", y=y, dy=dy, name=name)
6523     _result = _op.outputs[:]
6524     _inputs_flat = _op.inputs
6525     _attrs = ("T", _op.get_attr("T"))
6526     _execute.record_gradient(
6527       "RsqrtGrad", _inputs_flat, _attrs, _result, name)
6528     _result, = _result
6529     return _result
6530 
6531   else:
6532     try:
6533       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6534         _ctx._context_handle, _ctx._eager_context.device_name, "RsqrtGrad",
6535         name, _ctx._post_execution_callbacks, y, dy)
6536       return _result
6537     except _core._FallbackException:
6538       return rsqrt_grad_eager_fallback(
6539           y, dy, name=name, ctx=_ctx)
6540     except _core._NotOkStatusException as e:
6541       if name is not None:
6542         message = e.message + " name: " + name
6543       else:
6544         message = e.message
6545       _six.raise_from(_core._status_to_exception(e.code, message), None)
6546 
6547 
6548 def rsqrt_grad_eager_fallback(y, dy, name=None, ctx=None):
6549   r"""This is the slowpath function for Eager mode.
6550   This is for function rsqrt_grad
6551   """
6552   _ctx = ctx if ctx else _context.context()
6553   _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
6554   (y, dy) = _inputs_T
6555   _inputs_flat = [y, dy]
6556   _attrs = ("T", _attr_T)
6557   _result = _execute.execute(b"RsqrtGrad", 1, inputs=_inputs_flat,
6558                              attrs=_attrs, ctx=_ctx, name=name)
6559   _execute.record_gradient(
6560       "RsqrtGrad", _inputs_flat, _attrs, _result, name)
6561   _result, = _result
6562   return _result
6563 
6564 
6565 @tf_export('math.segment_max', 'segment_max')
6566 @deprecated_endpoints('segment_max')
6567 def segment_max(data, segment_ids, name=None):
6568   r"""Computes the maximum along segments of a tensor.
6569 
6570   Read
6571   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
6572   for an explanation of segments.
6573 
6574   Computes a tensor such that
6575   \\(output_i = \max_j(data_j)\\) where `max` is over `j` such
6576   that `segment_ids[j] == i`.
6577 
6578   If the max is empty for a given segment ID `i`, `output[i] = 0`.
6579 
6580   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
6581   <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt>
6582   </div>
6583 
6584   Args:
6585     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
6586     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6587       A 1-D tensor whose size is equal to the size of `data`'s
6588       first dimension.  Values should be sorted and can be repeated.
6589     name: A name for the operation (optional).
6590 
6591   Returns:
6592     A `Tensor`. Has the same type as `data`.
6593   """
6594   _ctx = _context._context
6595   if _ctx is None or not _ctx._eager_context.is_eager:
6596     _, _, _op = _op_def_lib._apply_op_helper(
6597         "SegmentMax", data=data, segment_ids=segment_ids, name=name)
6598     _result = _op.outputs[:]
6599     _inputs_flat = _op.inputs
6600     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
6601     _execute.record_gradient(
6602       "SegmentMax", _inputs_flat, _attrs, _result, name)
6603     _result, = _result
6604     return _result
6605 
6606   else:
6607     try:
6608       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6609         _ctx._context_handle, _ctx._eager_context.device_name, "SegmentMax",
6610         name, _ctx._post_execution_callbacks, data, segment_ids)
6611       return _result
6612     except _core._FallbackException:
6613       return segment_max_eager_fallback(
6614           data, segment_ids, name=name, ctx=_ctx)
6615     except _core._NotOkStatusException as e:
6616       if name is not None:
6617         message = e.message + " name: " + name
6618       else:
6619         message = e.message
6620       _six.raise_from(_core._status_to_exception(e.code, message), None)
6621 
6622 
6623 def segment_max_eager_fallback(data, segment_ids, name=None, ctx=None):
6624   r"""This is the slowpath function for Eager mode.
6625   This is for function segment_max
6626   """
6627   _ctx = ctx if ctx else _context.context()
6628   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
6629   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
6630   _inputs_flat = [data, segment_ids]
6631   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
6632   _result = _execute.execute(b"SegmentMax", 1, inputs=_inputs_flat,
6633                              attrs=_attrs, ctx=_ctx, name=name)
6634   _execute.record_gradient(
6635       "SegmentMax", _inputs_flat, _attrs, _result, name)
6636   _result, = _result
6637   return _result
6638 
6639 
6640 @tf_export('math.segment_mean', 'segment_mean')
6641 @deprecated_endpoints('segment_mean')
6642 def segment_mean(data, segment_ids, name=None):
6643   r"""Computes the mean along segments of a tensor.
6644 
6645   Read
6646   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
6647   for an explanation of segments.
6648 
6649   Computes a tensor such that
6650   \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is
6651   over `j` such that `segment_ids[j] == i` and `N` is the total number of
6652   values summed.
6653 
6654   If the mean is empty for a given segment ID `i`, `output[i] = 0`.
6655 
6656   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
6657   <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt>
6658   </div>
6659 
6660   Args:
6661     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
6662     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6663       A 1-D tensor whose size is equal to the size of `data`'s
6664       first dimension.  Values should be sorted and can be repeated.
6665     name: A name for the operation (optional).
6666 
6667   Returns:
6668     A `Tensor`. Has the same type as `data`.
6669   """
6670   _ctx = _context._context
6671   if _ctx is None or not _ctx._eager_context.is_eager:
6672     _, _, _op = _op_def_lib._apply_op_helper(
6673         "SegmentMean", data=data, segment_ids=segment_ids, name=name)
6674     _result = _op.outputs[:]
6675     _inputs_flat = _op.inputs
6676     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
6677     _execute.record_gradient(
6678       "SegmentMean", _inputs_flat, _attrs, _result, name)
6679     _result, = _result
6680     return _result
6681 
6682   else:
6683     try:
6684       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6685         _ctx._context_handle, _ctx._eager_context.device_name, "SegmentMean",
6686         name, _ctx._post_execution_callbacks, data, segment_ids)
6687       return _result
6688     except _core._FallbackException:
6689       return segment_mean_eager_fallback(
6690           data, segment_ids, name=name, ctx=_ctx)
6691     except _core._NotOkStatusException as e:
6692       if name is not None:
6693         message = e.message + " name: " + name
6694       else:
6695         message = e.message
6696       _six.raise_from(_core._status_to_exception(e.code, message), None)
6697 
6698 
6699 def segment_mean_eager_fallback(data, segment_ids, name=None, ctx=None):
6700   r"""This is the slowpath function for Eager mode.
6701   This is for function segment_mean
6702   """
6703   _ctx = ctx if ctx else _context.context()
6704   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
6705   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
6706   _inputs_flat = [data, segment_ids]
6707   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
6708   _result = _execute.execute(b"SegmentMean", 1, inputs=_inputs_flat,
6709                              attrs=_attrs, ctx=_ctx, name=name)
6710   _execute.record_gradient(
6711       "SegmentMean", _inputs_flat, _attrs, _result, name)
6712   _result, = _result
6713   return _result
6714 
6715 
6716 @tf_export('math.segment_min', 'segment_min')
6717 @deprecated_endpoints('segment_min')
6718 def segment_min(data, segment_ids, name=None):
6719   r"""Computes the minimum along segments of a tensor.
6720 
6721   Read
6722   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
6723   for an explanation of segments.
6724 
6725   Computes a tensor such that
6726   \\(output_i = \min_j(data_j)\\) where `min` is over `j` such
6727   that `segment_ids[j] == i`.
6728 
6729   If the min is empty for a given segment ID `i`, `output[i] = 0`.
6730 
6731   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
6732   <img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt>
6733   </div>
6734 
6735   Args:
6736     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
6737     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6738       A 1-D tensor whose size is equal to the size of `data`'s
6739       first dimension.  Values should be sorted and can be repeated.
6740     name: A name for the operation (optional).
6741 
6742   Returns:
6743     A `Tensor`. Has the same type as `data`.
6744   """
6745   _ctx = _context._context
6746   if _ctx is None or not _ctx._eager_context.is_eager:
6747     _, _, _op = _op_def_lib._apply_op_helper(
6748         "SegmentMin", data=data, segment_ids=segment_ids, name=name)
6749     _result = _op.outputs[:]
6750     _inputs_flat = _op.inputs
6751     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
6752     _execute.record_gradient(
6753       "SegmentMin", _inputs_flat, _attrs, _result, name)
6754     _result, = _result
6755     return _result
6756 
6757   else:
6758     try:
6759       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6760         _ctx._context_handle, _ctx._eager_context.device_name, "SegmentMin",
6761         name, _ctx._post_execution_callbacks, data, segment_ids)
6762       return _result
6763     except _core._FallbackException:
6764       return segment_min_eager_fallback(
6765           data, segment_ids, name=name, ctx=_ctx)
6766     except _core._NotOkStatusException as e:
6767       if name is not None:
6768         message = e.message + " name: " + name
6769       else:
6770         message = e.message
6771       _six.raise_from(_core._status_to_exception(e.code, message), None)
6772 
6773 
6774 def segment_min_eager_fallback(data, segment_ids, name=None, ctx=None):
6775   r"""This is the slowpath function for Eager mode.
6776   This is for function segment_min
6777   """
6778   _ctx = ctx if ctx else _context.context()
6779   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
6780   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
6781   _inputs_flat = [data, segment_ids]
6782   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
6783   _result = _execute.execute(b"SegmentMin", 1, inputs=_inputs_flat,
6784                              attrs=_attrs, ctx=_ctx, name=name)
6785   _execute.record_gradient(
6786       "SegmentMin", _inputs_flat, _attrs, _result, name)
6787   _result, = _result
6788   return _result
6789 
6790 
6791 @tf_export('math.segment_prod', 'segment_prod')
6792 @deprecated_endpoints('segment_prod')
6793 def segment_prod(data, segment_ids, name=None):
6794   r"""Computes the product along segments of a tensor.
6795 
6796   Read
6797   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
6798   for an explanation of segments.
6799 
6800   Computes a tensor such that
6801   \\(output_i = \prod_j data_j\\) where the product is over `j` such
6802   that `segment_ids[j] == i`.
6803 
6804   If the product is empty for a given segment ID `i`, `output[i] = 1`.
6805 
6806   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
6807   <img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt>
6808   </div>
6809 
6810   Args:
6811     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
6812     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6813       A 1-D tensor whose size is equal to the size of `data`'s
6814       first dimension.  Values should be sorted and can be repeated.
6815     name: A name for the operation (optional).
6816 
6817   Returns:
6818     A `Tensor`. Has the same type as `data`.
6819   """
6820   _ctx = _context._context
6821   if _ctx is None or not _ctx._eager_context.is_eager:
6822     _, _, _op = _op_def_lib._apply_op_helper(
6823         "SegmentProd", data=data, segment_ids=segment_ids, name=name)
6824     _result = _op.outputs[:]
6825     _inputs_flat = _op.inputs
6826     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
6827     _execute.record_gradient(
6828       "SegmentProd", _inputs_flat, _attrs, _result, name)
6829     _result, = _result
6830     return _result
6831 
6832   else:
6833     try:
6834       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6835         _ctx._context_handle, _ctx._eager_context.device_name, "SegmentProd",
6836         name, _ctx._post_execution_callbacks, data, segment_ids)
6837       return _result
6838     except _core._FallbackException:
6839       return segment_prod_eager_fallback(
6840           data, segment_ids, name=name, ctx=_ctx)
6841     except _core._NotOkStatusException as e:
6842       if name is not None:
6843         message = e.message + " name: " + name
6844       else:
6845         message = e.message
6846       _six.raise_from(_core._status_to_exception(e.code, message), None)
6847 
6848 
6849 def segment_prod_eager_fallback(data, segment_ids, name=None, ctx=None):
6850   r"""This is the slowpath function for Eager mode.
6851   This is for function segment_prod
6852   """
6853   _ctx = ctx if ctx else _context.context()
6854   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
6855   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
6856   _inputs_flat = [data, segment_ids]
6857   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
6858   _result = _execute.execute(b"SegmentProd", 1, inputs=_inputs_flat,
6859                              attrs=_attrs, ctx=_ctx, name=name)
6860   _execute.record_gradient(
6861       "SegmentProd", _inputs_flat, _attrs, _result, name)
6862   _result, = _result
6863   return _result
6864 
6865 
6866 @tf_export('math.segment_sum', 'segment_sum')
6867 @deprecated_endpoints('segment_sum')
6868 def segment_sum(data, segment_ids, name=None):
6869   r"""Computes the sum along segments of a tensor.
6870 
6871   Read
6872   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
6873   for an explanation of segments.
6874 
6875   Computes a tensor such that
6876   \\(output_i = \sum_j data_j\\) where sum is over `j` such
6877   that `segment_ids[j] == i`.
6878 
6879   If the sum is empty for a given segment ID `i`, `output[i] = 0`.
6880 
6881   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
6882   <img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt>
6883   </div>
6884 
6885   Args:
6886     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
6887     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
6888       A 1-D tensor whose size is equal to the size of `data`'s
6889       first dimension.  Values should be sorted and can be repeated.
6890     name: A name for the operation (optional).
6891 
6892   Returns:
6893     A `Tensor`. Has the same type as `data`.
6894   """
6895   _ctx = _context._context
6896   if _ctx is None or not _ctx._eager_context.is_eager:
6897     _, _, _op = _op_def_lib._apply_op_helper(
6898         "SegmentSum", data=data, segment_ids=segment_ids, name=name)
6899     _result = _op.outputs[:]
6900     _inputs_flat = _op.inputs
6901     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"))
6902     _execute.record_gradient(
6903       "SegmentSum", _inputs_flat, _attrs, _result, name)
6904     _result, = _result
6905     return _result
6906 
6907   else:
6908     try:
6909       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
6910         _ctx._context_handle, _ctx._eager_context.device_name, "SegmentSum",
6911         name, _ctx._post_execution_callbacks, data, segment_ids)
6912       return _result
6913     except _core._FallbackException:
6914       return segment_sum_eager_fallback(
6915           data, segment_ids, name=name, ctx=_ctx)
6916     except _core._NotOkStatusException as e:
6917       if name is not None:
6918         message = e.message + " name: " + name
6919       else:
6920         message = e.message
6921       _six.raise_from(_core._status_to_exception(e.code, message), None)
6922 
6923 
6924 def segment_sum_eager_fallback(data, segment_ids, name=None, ctx=None):
6925   r"""This is the slowpath function for Eager mode.
6926   This is for function segment_sum
6927   """
6928   _ctx = ctx if ctx else _context.context()
6929   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
6930   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
6931   _inputs_flat = [data, segment_ids]
6932   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
6933   _result = _execute.execute(b"SegmentSum", 1, inputs=_inputs_flat,
6934                              attrs=_attrs, ctx=_ctx, name=name)
6935   _execute.record_gradient(
6936       "SegmentSum", _inputs_flat, _attrs, _result, name)
6937   _result, = _result
6938   return _result
6939 
6940 
6941 def select(condition, x, y, name=None):
6942   r"""Selects elements from `x` or `y`, depending on `condition`.
6943 
6944   The `x`, and `y` tensors must all have the same shape, and the
6945   output will also have that shape.
6946 
6947   The `condition` tensor must be a scalar if `x` and `y` are scalars.
6948   If `x` and `y` are vectors or higher rank, then `condition` must be either a
6949   scalar, a vector with size matching the first dimension of `x`, or must have
6950   the same shape as `x`.
6951 
6952   The `condition` tensor acts as a mask that chooses, based on the value at each
6953   element, whether the corresponding element / row in the output should be
6954   taken from `x` (if true) or `y` (if false).
6955 
6956   If `condition` is a vector and `x` and `y` are higher rank matrices, then
6957   it chooses which row (outer dimension) to copy from `x` and `y`.
6958   If `condition` has the same shape as `x` and `y`, then it chooses which
6959   element to copy from `x` and `y`.
6960 
6961   For example:
6962 
6963   ```python
6964   # 'condition' tensor is [[True,  False]
6965   #                        [False, True]]
6966   # 't' is [[1, 2],
6967   #         [3, 4]]
6968   # 'e' is [[5, 6],
6969   #         [7, 8]]
6970   select(condition, t, e)  # => [[1, 6], [7, 4]]
6971 
6972 
6973   # 'condition' tensor is [True, False]
6974   # 't' is [[1, 2],
6975   #         [3, 4]]
6976   # 'e' is [[5, 6],
6977   #         [7, 8]]
6978   select(condition, t, e) ==> [[1, 2],
6979                                [7, 8]]
6980 
6981   ```
6982 
6983   Args:
6984     condition: A `Tensor` of type `bool`.
6985     x:  A `Tensor` which may have the same shape as `condition`.
6986       If `condition` is rank 1, `x` may have higher rank,
6987       but its first dimension must match the size of `condition`.
6988     y:  A `Tensor` with the same type and shape as `x`.
6989     name: A name for the operation (optional).
6990 
6991   Returns:
6992     A `Tensor`. Has the same type as `t`.
6993   """
6994   _ctx = _context._context
6995   if _ctx is None or not _ctx._eager_context.is_eager:
6996     _, _, _op = _op_def_lib._apply_op_helper(
6997         "Select", condition=condition, t=x, e=y, name=name)
6998     _result = _op.outputs[:]
6999     _inputs_flat = _op.inputs
7000     _attrs = ("T", _op.get_attr("T"))
7001     _execute.record_gradient(
7002       "Select", _inputs_flat, _attrs, _result, name)
7003     _result, = _result
7004     return _result
7005 
7006   else:
7007     try:
7008       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7009         _ctx._context_handle, _ctx._eager_context.device_name, "Select", name,
7010         _ctx._post_execution_callbacks, condition, x, y)
7011       return _result
7012     except _core._FallbackException:
7013       return select_eager_fallback(
7014           condition, x, y, name=name, ctx=_ctx)
7015     except _core._NotOkStatusException as e:
7016       if name is not None:
7017         message = e.message + " name: " + name
7018       else:
7019         message = e.message
7020       _six.raise_from(_core._status_to_exception(e.code, message), None)
7021 
7022 
7023 def select_eager_fallback(condition, x, y, name=None, ctx=None):
7024   r"""This is the slowpath function for Eager mode.
7025   This is for function select
7026   """
7027   _ctx = ctx if ctx else _context.context()
7028   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
7029   (x, y) = _inputs_T
7030   condition = _ops.convert_to_tensor(condition, _dtypes.bool)
7031   _inputs_flat = [condition, x, y]
7032   _attrs = ("T", _attr_T)
7033   _result = _execute.execute(b"Select", 1, inputs=_inputs_flat, attrs=_attrs,
7034                              ctx=_ctx, name=name)
7035   _execute.record_gradient(
7036       "Select", _inputs_flat, _attrs, _result, name)
7037   _result, = _result
7038   return _result
7039 
7040 
7041 def sigmoid(x, name=None):
7042   r"""Computes sigmoid of `x` element-wise.
7043 
7044   Specifically, `y = 1 / (1 + exp(-x))`.
7045 
7046   Args:
7047     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
7048     name: A name for the operation (optional).
7049 
7050   Returns:
7051     A `Tensor`. Has the same type as `x`.
7052   """
7053   _ctx = _context._context
7054   if _ctx is None or not _ctx._eager_context.is_eager:
7055     _, _, _op = _op_def_lib._apply_op_helper(
7056         "Sigmoid", x=x, name=name)
7057     _result = _op.outputs[:]
7058     _inputs_flat = _op.inputs
7059     _attrs = ("T", _op.get_attr("T"))
7060     _execute.record_gradient(
7061       "Sigmoid", _inputs_flat, _attrs, _result, name)
7062     _result, = _result
7063     return _result
7064 
7065   else:
7066     try:
7067       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7068         _ctx._context_handle, _ctx._eager_context.device_name, "Sigmoid",
7069         name, _ctx._post_execution_callbacks, x)
7070       return _result
7071     except _core._FallbackException:
7072       return sigmoid_eager_fallback(
7073           x, name=name, ctx=_ctx)
7074     except _core._NotOkStatusException as e:
7075       if name is not None:
7076         message = e.message + " name: " + name
7077       else:
7078         message = e.message
7079       _six.raise_from(_core._status_to_exception(e.code, message), None)
7080 
7081 
7082 def sigmoid_eager_fallback(x, name=None, ctx=None):
7083   r"""This is the slowpath function for Eager mode.
7084   This is for function sigmoid
7085   """
7086   _ctx = ctx if ctx else _context.context()
7087   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
7088   _inputs_flat = [x]
7089   _attrs = ("T", _attr_T)
7090   _result = _execute.execute(b"Sigmoid", 1, inputs=_inputs_flat, attrs=_attrs,
7091                              ctx=_ctx, name=name)
7092   _execute.record_gradient(
7093       "Sigmoid", _inputs_flat, _attrs, _result, name)
7094   _result, = _result
7095   return _result
7096 
7097 
7098 def sigmoid_grad(y, dy, name=None):
7099   r"""Computes the gradient of the sigmoid of `x` wrt its input.
7100 
7101   Specifically, `grad = dy * y * (1 - y)`, where `y = sigmoid(x)`, and
7102   `dy` is the corresponding input gradient.
7103 
7104   Args:
7105     y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
7106     dy: A `Tensor`. Must have the same type as `y`.
7107     name: A name for the operation (optional).
7108 
7109   Returns:
7110     A `Tensor`. Has the same type as `y`.
7111   """
7112   _ctx = _context._context
7113   if _ctx is None or not _ctx._eager_context.is_eager:
7114     _, _, _op = _op_def_lib._apply_op_helper(
7115         "SigmoidGrad", y=y, dy=dy, name=name)
7116     _result = _op.outputs[:]
7117     _inputs_flat = _op.inputs
7118     _attrs = ("T", _op.get_attr("T"))
7119     _execute.record_gradient(
7120       "SigmoidGrad", _inputs_flat, _attrs, _result, name)
7121     _result, = _result
7122     return _result
7123 
7124   else:
7125     try:
7126       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7127         _ctx._context_handle, _ctx._eager_context.device_name, "SigmoidGrad",
7128         name, _ctx._post_execution_callbacks, y, dy)
7129       return _result
7130     except _core._FallbackException:
7131       return sigmoid_grad_eager_fallback(
7132           y, dy, name=name, ctx=_ctx)
7133     except _core._NotOkStatusException as e:
7134       if name is not None:
7135         message = e.message + " name: " + name
7136       else:
7137         message = e.message
7138       _six.raise_from(_core._status_to_exception(e.code, message), None)
7139 
7140 
7141 def sigmoid_grad_eager_fallback(y, dy, name=None, ctx=None):
7142   r"""This is the slowpath function for Eager mode.
7143   This is for function sigmoid_grad
7144   """
7145   _ctx = ctx if ctx else _context.context()
7146   _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
7147   (y, dy) = _inputs_T
7148   _inputs_flat = [y, dy]
7149   _attrs = ("T", _attr_T)
7150   _result = _execute.execute(b"SigmoidGrad", 1, inputs=_inputs_flat,
7151                              attrs=_attrs, ctx=_ctx, name=name)
7152   _execute.record_gradient(
7153       "SigmoidGrad", _inputs_flat, _attrs, _result, name)
7154   _result, = _result
7155   return _result
7156 
7157 
7158 def sign(x, name=None):
7159   r"""Returns an element-wise indication of the sign of a number.
7160 
7161   `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
7162 
7163   For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
7164 
7165   Args:
7166     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
7167     name: A name for the operation (optional).
7168 
7169   Returns:
7170     A `Tensor`. Has the same type as `x`.
7171   """
7172   _ctx = _context._context
7173   if _ctx is None or not _ctx._eager_context.is_eager:
7174     _, _, _op = _op_def_lib._apply_op_helper(
7175         "Sign", x=x, name=name)
7176     _result = _op.outputs[:]
7177     _inputs_flat = _op.inputs
7178     _attrs = ("T", _op.get_attr("T"))
7179     _execute.record_gradient(
7180       "Sign", _inputs_flat, _attrs, _result, name)
7181     _result, = _result
7182     return _result
7183 
7184   else:
7185     try:
7186       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7187         _ctx._context_handle, _ctx._eager_context.device_name, "Sign", name,
7188         _ctx._post_execution_callbacks, x)
7189       return _result
7190     except _core._FallbackException:
7191       return sign_eager_fallback(
7192           x, name=name, ctx=_ctx)
7193     except _core._NotOkStatusException as e:
7194       if name is not None:
7195         message = e.message + " name: " + name
7196       else:
7197         message = e.message
7198       _six.raise_from(_core._status_to_exception(e.code, message), None)
7199 
7200 
7201 def sign_eager_fallback(x, name=None, ctx=None):
7202   r"""This is the slowpath function for Eager mode.
7203   This is for function sign
7204   """
7205   _ctx = ctx if ctx else _context.context()
7206   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
7207   _inputs_flat = [x]
7208   _attrs = ("T", _attr_T)
7209   _result = _execute.execute(b"Sign", 1, inputs=_inputs_flat, attrs=_attrs,
7210                              ctx=_ctx, name=name)
7211   _execute.record_gradient(
7212       "Sign", _inputs_flat, _attrs, _result, name)
7213   _result, = _result
7214   return _result
7215 
7216 
7217 @tf_export('math.sin', 'sin')
7218 @deprecated_endpoints('sin')
7219 def sin(x, name=None):
7220   r"""Computes sin of x element-wise.
7221 
7222   Args:
7223     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
7224     name: A name for the operation (optional).
7225 
7226   Returns:
7227     A `Tensor`. Has the same type as `x`.
7228   """
7229   _ctx = _context._context
7230   if _ctx is None or not _ctx._eager_context.is_eager:
7231     _, _, _op = _op_def_lib._apply_op_helper(
7232         "Sin", x=x, name=name)
7233     _result = _op.outputs[:]
7234     _inputs_flat = _op.inputs
7235     _attrs = ("T", _op.get_attr("T"))
7236     _execute.record_gradient(
7237       "Sin", _inputs_flat, _attrs, _result, name)
7238     _result, = _result
7239     return _result
7240 
7241   else:
7242     try:
7243       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7244         _ctx._context_handle, _ctx._eager_context.device_name, "Sin", name,
7245         _ctx._post_execution_callbacks, x)
7246       return _result
7247     except _core._FallbackException:
7248       return sin_eager_fallback(
7249           x, name=name, ctx=_ctx)
7250     except _core._NotOkStatusException as e:
7251       if name is not None:
7252         message = e.message + " name: " + name
7253       else:
7254         message = e.message
7255       _six.raise_from(_core._status_to_exception(e.code, message), None)
7256 
7257 
7258 def sin_eager_fallback(x, name=None, ctx=None):
7259   r"""This is the slowpath function for Eager mode.
7260   This is for function sin
7261   """
7262   _ctx = ctx if ctx else _context.context()
7263   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
7264   _inputs_flat = [x]
7265   _attrs = ("T", _attr_T)
7266   _result = _execute.execute(b"Sin", 1, inputs=_inputs_flat, attrs=_attrs,
7267                              ctx=_ctx, name=name)
7268   _execute.record_gradient(
7269       "Sin", _inputs_flat, _attrs, _result, name)
7270   _result, = _result
7271   return _result
7272 
7273 
7274 @tf_export('math.sinh', 'sinh')
7275 @deprecated_endpoints('sinh')
7276 def sinh(x, name=None):
7277   r"""Computes hyperbolic sine of x element-wise.
7278 
7279   Args:
7280     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
7281     name: A name for the operation (optional).
7282 
7283   Returns:
7284     A `Tensor`. Has the same type as `x`.
7285   """
7286   _ctx = _context._context
7287   if _ctx is None or not _ctx._eager_context.is_eager:
7288     _, _, _op = _op_def_lib._apply_op_helper(
7289         "Sinh", x=x, name=name)
7290     _result = _op.outputs[:]
7291     _inputs_flat = _op.inputs
7292     _attrs = ("T", _op.get_attr("T"))
7293     _execute.record_gradient(
7294       "Sinh", _inputs_flat, _attrs, _result, name)
7295     _result, = _result
7296     return _result
7297 
7298   else:
7299     try:
7300       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7301         _ctx._context_handle, _ctx._eager_context.device_name, "Sinh", name,
7302         _ctx._post_execution_callbacks, x)
7303       return _result
7304     except _core._FallbackException:
7305       return sinh_eager_fallback(
7306           x, name=name, ctx=_ctx)
7307     except _core._NotOkStatusException as e:
7308       if name is not None:
7309         message = e.message + " name: " + name
7310       else:
7311         message = e.message
7312       _six.raise_from(_core._status_to_exception(e.code, message), None)
7313 
7314 
7315 def sinh_eager_fallback(x, name=None, ctx=None):
7316   r"""This is the slowpath function for Eager mode.
7317   This is for function sinh
7318   """
7319   _ctx = ctx if ctx else _context.context()
7320   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
7321   _inputs_flat = [x]
7322   _attrs = ("T", _attr_T)
7323   _result = _execute.execute(b"Sinh", 1, inputs=_inputs_flat, attrs=_attrs,
7324                              ctx=_ctx, name=name)
7325   _execute.record_gradient(
7326       "Sinh", _inputs_flat, _attrs, _result, name)
7327   _result, = _result
7328   return _result
7329 
7330 
7331 def sparse_mat_mul(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None):
7332   r"""Multiply matrix "a" by matrix "b".
7333 
7334   The inputs must be two-dimensional matrices and the inner dimension of "a" must
7335   match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not
7336   `SparseTensor`s.  This op is optimized for the case where at least one of "a" or
7337   "b" is sparse, in the sense that they have a large proportion of zero values.
7338   The breakeven for using this versus a dense matrix multiply on one platform was
7339   30% zero values in the sparse matrix.
7340 
7341   The gradient computation of this operation will only take advantage of sparsity
7342   in the input gradient when that gradient comes from a Relu.
7343 
7344   Args:
7345     a: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`.
7346     b: A `Tensor`. Must be one of the following types: `float32`, `bfloat16`.
7347     transpose_a: An optional `bool`. Defaults to `False`.
7348     transpose_b: An optional `bool`. Defaults to `False`.
7349     a_is_sparse: An optional `bool`. Defaults to `False`.
7350     b_is_sparse: An optional `bool`. Defaults to `False`.
7351     name: A name for the operation (optional).
7352 
7353   Returns:
7354     A `Tensor` of type `float32`.
7355   """
7356   _ctx = _context._context
7357   if _ctx is None or not _ctx._eager_context.is_eager:
7358     if transpose_a is None:
7359       transpose_a = False
7360     transpose_a = _execute.make_bool(transpose_a, "transpose_a")
7361     if transpose_b is None:
7362       transpose_b = False
7363     transpose_b = _execute.make_bool(transpose_b, "transpose_b")
7364     if a_is_sparse is None:
7365       a_is_sparse = False
7366     a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse")
7367     if b_is_sparse is None:
7368       b_is_sparse = False
7369     b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse")
7370     _, _, _op = _op_def_lib._apply_op_helper(
7371         "SparseMatMul", a=a, b=b, transpose_a=transpose_a,
7372         transpose_b=transpose_b, a_is_sparse=a_is_sparse,
7373         b_is_sparse=b_is_sparse, name=name)
7374     _result = _op.outputs[:]
7375     _inputs_flat = _op.inputs
7376     _attrs = ("transpose_a", _op.get_attr("transpose_a"), "transpose_b",
7377               _op.get_attr("transpose_b"), "a_is_sparse",
7378               _op.get_attr("a_is_sparse"), "b_is_sparse",
7379               _op.get_attr("b_is_sparse"), "Ta", _op.get_attr("Ta"), "Tb",
7380               _op.get_attr("Tb"))
7381     _execute.record_gradient(
7382       "SparseMatMul", _inputs_flat, _attrs, _result, name)
7383     _result, = _result
7384     return _result
7385 
7386   else:
7387     try:
7388       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7389         _ctx._context_handle, _ctx._eager_context.device_name, "SparseMatMul",
7390         name, _ctx._post_execution_callbacks, a, b, "transpose_a",
7391         transpose_a, "transpose_b", transpose_b, "a_is_sparse", a_is_sparse,
7392         "b_is_sparse", b_is_sparse)
7393       return _result
7394     except _core._FallbackException:
7395       return sparse_mat_mul_eager_fallback(
7396           a, b, transpose_a=transpose_a, transpose_b=transpose_b,
7397           a_is_sparse=a_is_sparse, b_is_sparse=b_is_sparse, name=name,
7398           ctx=_ctx)
7399     except _core._NotOkStatusException as e:
7400       if name is not None:
7401         message = e.message + " name: " + name
7402       else:
7403         message = e.message
7404       _six.raise_from(_core._status_to_exception(e.code, message), None)
7405 
7406 
7407 def sparse_mat_mul_eager_fallback(a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None, ctx=None):
7408   r"""This is the slowpath function for Eager mode.
7409   This is for function sparse_mat_mul
7410   """
7411   _ctx = ctx if ctx else _context.context()
7412   if transpose_a is None:
7413     transpose_a = False
7414   transpose_a = _execute.make_bool(transpose_a, "transpose_a")
7415   if transpose_b is None:
7416     transpose_b = False
7417   transpose_b = _execute.make_bool(transpose_b, "transpose_b")
7418   if a_is_sparse is None:
7419     a_is_sparse = False
7420   a_is_sparse = _execute.make_bool(a_is_sparse, "a_is_sparse")
7421   if b_is_sparse is None:
7422     b_is_sparse = False
7423   b_is_sparse = _execute.make_bool(b_is_sparse, "b_is_sparse")
7424   _attr_Ta, (a,) = _execute.args_to_matching_eager([a], _ctx, _dtypes.float32)
7425   _attr_Tb, (b,) = _execute.args_to_matching_eager([b], _ctx, _dtypes.float32)
7426   _inputs_flat = [a, b]
7427   _attrs = ("transpose_a", transpose_a, "transpose_b", transpose_b,
7428   "a_is_sparse", a_is_sparse, "b_is_sparse", b_is_sparse, "Ta", _attr_Ta,
7429   "Tb", _attr_Tb)
7430   _result = _execute.execute(b"SparseMatMul", 1, inputs=_inputs_flat,
7431                              attrs=_attrs, ctx=_ctx, name=name)
7432   _execute.record_gradient(
7433       "SparseMatMul", _inputs_flat, _attrs, _result, name)
7434   _result, = _result
7435   return _result
7436 
7437 
7438 def sparse_segment_mean(data, indices, segment_ids, name=None):
7439   r"""Computes the mean along sparse segments of a tensor.
7440 
7441   Read
7442   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
7443   for an explanation of segments.
7444 
7445   Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first
7446   dimension, selecting a subset of dimension 0, specified by `indices`.
7447 
7448   Args:
7449     data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
7450     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7451       A 1-D tensor. Has same rank as `segment_ids`.
7452     segment_ids: A `Tensor` of type `int32`.
7453       A 1-D tensor. Values should be sorted and can be repeated.
7454     name: A name for the operation (optional).
7455 
7456   Returns:
7457     A `Tensor`. Has the same type as `data`.
7458   """
7459   _ctx = _context._context
7460   if _ctx is None or not _ctx._eager_context.is_eager:
7461     _, _, _op = _op_def_lib._apply_op_helper(
7462         "SparseSegmentMean", data=data, indices=indices,
7463         segment_ids=segment_ids, name=name)
7464     _result = _op.outputs[:]
7465     _inputs_flat = _op.inputs
7466     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
7467     _execute.record_gradient(
7468       "SparseSegmentMean", _inputs_flat, _attrs, _result, name)
7469     _result, = _result
7470     return _result
7471 
7472   else:
7473     try:
7474       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7475         _ctx._context_handle, _ctx._eager_context.device_name,
7476         "SparseSegmentMean", name, _ctx._post_execution_callbacks, data,
7477         indices, segment_ids)
7478       return _result
7479     except _core._FallbackException:
7480       return sparse_segment_mean_eager_fallback(
7481           data, indices, segment_ids, name=name, ctx=_ctx)
7482     except _core._NotOkStatusException as e:
7483       if name is not None:
7484         message = e.message + " name: " + name
7485       else:
7486         message = e.message
7487       _six.raise_from(_core._status_to_exception(e.code, message), None)
7488 
7489 
7490 def sparse_segment_mean_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
7491   r"""This is the slowpath function for Eager mode.
7492   This is for function sparse_segment_mean
7493   """
7494   _ctx = ctx if ctx else _context.context()
7495   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
7496   _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
7497   segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
7498   _inputs_flat = [data, indices, segment_ids]
7499   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
7500   _result = _execute.execute(b"SparseSegmentMean", 1, inputs=_inputs_flat,
7501                              attrs=_attrs, ctx=_ctx, name=name)
7502   _execute.record_gradient(
7503       "SparseSegmentMean", _inputs_flat, _attrs, _result, name)
7504   _result, = _result
7505   return _result
7506 
7507 
7508 def sparse_segment_mean_grad(grad, indices, segment_ids, output_dim0, name=None):
7509   r"""Computes gradients for SparseSegmentMean.
7510 
7511   Returns tensor "output" with same shape as grad, except for dimension 0 whose
7512   value is output_dim0.
7513 
7514   Args:
7515     grad: A `Tensor`. Must be one of the following types: `float32`, `float64`.
7516       gradient propagated to the SparseSegmentMean op.
7517     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7518       indices passed to the corresponding SparseSegmentMean op.
7519     segment_ids: A `Tensor` of type `int32`.
7520       segment_ids passed to the corresponding SparseSegmentMean op.
7521     output_dim0: A `Tensor` of type `int32`.
7522       dimension 0 of "data" passed to SparseSegmentMean op.
7523     name: A name for the operation (optional).
7524 
7525   Returns:
7526     A `Tensor`. Has the same type as `grad`.
7527   """
7528   _ctx = _context._context
7529   if _ctx is None or not _ctx._eager_context.is_eager:
7530     _, _, _op = _op_def_lib._apply_op_helper(
7531         "SparseSegmentMeanGrad", grad=grad, indices=indices,
7532         segment_ids=segment_ids, output_dim0=output_dim0, name=name)
7533     _result = _op.outputs[:]
7534     _inputs_flat = _op.inputs
7535     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
7536     _execute.record_gradient(
7537       "SparseSegmentMeanGrad", _inputs_flat, _attrs, _result, name)
7538     _result, = _result
7539     return _result
7540 
7541   else:
7542     try:
7543       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7544         _ctx._context_handle, _ctx._eager_context.device_name,
7545         "SparseSegmentMeanGrad", name, _ctx._post_execution_callbacks, grad,
7546         indices, segment_ids, output_dim0)
7547       return _result
7548     except _core._FallbackException:
7549       return sparse_segment_mean_grad_eager_fallback(
7550           grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx)
7551     except _core._NotOkStatusException as e:
7552       if name is not None:
7553         message = e.message + " name: " + name
7554       else:
7555         message = e.message
7556       _six.raise_from(_core._status_to_exception(e.code, message), None)
7557 
7558 
7559 def sparse_segment_mean_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name=None, ctx=None):
7560   r"""This is the slowpath function for Eager mode.
7561   This is for function sparse_segment_mean_grad
7562   """
7563   _ctx = ctx if ctx else _context.context()
7564   _attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
7565   _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
7566   segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
7567   output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32)
7568   _inputs_flat = [grad, indices, segment_ids, output_dim0]
7569   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
7570   _result = _execute.execute(b"SparseSegmentMeanGrad", 1, inputs=_inputs_flat,
7571                              attrs=_attrs, ctx=_ctx, name=name)
7572   _execute.record_gradient(
7573       "SparseSegmentMeanGrad", _inputs_flat, _attrs, _result, name)
7574   _result, = _result
7575   return _result
7576 
7577 
7578 def sparse_segment_mean_with_num_segments(data, indices, segment_ids, num_segments, name=None):
7579   r"""Computes the mean along sparse segments of a tensor.
7580 
7581   Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is
7582   misisng, the `output` tensor at that position will be zeroed.
7583 
7584   Read
7585   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
7586   for an explanation of segments.
7587 
7588   Args:
7589     data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
7590     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7591       A 1-D tensor. Has same rank as `segment_ids`.
7592     segment_ids: A `Tensor` of type `int32`.
7593       A 1-D tensor. Values should be sorted and can be repeated.
7594     num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7595       Should equal the number of distinct segment IDs.
7596     name: A name for the operation (optional).
7597 
7598   Returns:
7599     A `Tensor`. Has the same type as `data`.
7600   """
7601   _ctx = _context._context
7602   if _ctx is None or not _ctx._eager_context.is_eager:
7603     _, _, _op = _op_def_lib._apply_op_helper(
7604         "SparseSegmentMeanWithNumSegments", data=data, indices=indices,
7605         segment_ids=segment_ids, num_segments=num_segments, name=name)
7606     _result = _op.outputs[:]
7607     _inputs_flat = _op.inputs
7608     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
7609               "Tnumsegments", _op.get_attr("Tnumsegments"))
7610     _execute.record_gradient(
7611       "SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result, name)
7612     _result, = _result
7613     return _result
7614 
7615   else:
7616     try:
7617       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7618         _ctx._context_handle, _ctx._eager_context.device_name,
7619         "SparseSegmentMeanWithNumSegments", name,
7620         _ctx._post_execution_callbacks, data, indices, segment_ids,
7621         num_segments)
7622       return _result
7623     except _core._FallbackException:
7624       return sparse_segment_mean_with_num_segments_eager_fallback(
7625           data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
7626     except _core._NotOkStatusException as e:
7627       if name is not None:
7628         message = e.message + " name: " + name
7629       else:
7630         message = e.message
7631       _six.raise_from(_core._status_to_exception(e.code, message), None)
7632 
7633 
7634 def sparse_segment_mean_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
7635   r"""This is the slowpath function for Eager mode.
7636   This is for function sparse_segment_mean_with_num_segments
7637   """
7638   _ctx = ctx if ctx else _context.context()
7639   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
7640   _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
7641   _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
7642   segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
7643   _inputs_flat = [data, indices, segment_ids, num_segments]
7644   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
7645   _attr_Tnumsegments)
7646   _result = _execute.execute(b"SparseSegmentMeanWithNumSegments", 1,
7647                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
7648                              name=name)
7649   _execute.record_gradient(
7650       "SparseSegmentMeanWithNumSegments", _inputs_flat, _attrs, _result, name)
7651   _result, = _result
7652   return _result
7653 
7654 
7655 def sparse_segment_sqrt_n(data, indices, segment_ids, name=None):
7656   r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N.
7657 
7658   N is the size of the segment being reduced.
7659 
7660   Read
7661   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
7662   for an explanation of segments.
7663 
7664   Args:
7665     data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
7666     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7667       A 1-D tensor. Has same rank as `segment_ids`.
7668     segment_ids: A `Tensor` of type `int32`.
7669       A 1-D tensor. Values should be sorted and can be repeated.
7670     name: A name for the operation (optional).
7671 
7672   Returns:
7673     A `Tensor`. Has the same type as `data`.
7674   """
7675   _ctx = _context._context
7676   if _ctx is None or not _ctx._eager_context.is_eager:
7677     _, _, _op = _op_def_lib._apply_op_helper(
7678         "SparseSegmentSqrtN", data=data, indices=indices,
7679         segment_ids=segment_ids, name=name)
7680     _result = _op.outputs[:]
7681     _inputs_flat = _op.inputs
7682     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
7683     _execute.record_gradient(
7684       "SparseSegmentSqrtN", _inputs_flat, _attrs, _result, name)
7685     _result, = _result
7686     return _result
7687 
7688   else:
7689     try:
7690       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7691         _ctx._context_handle, _ctx._eager_context.device_name,
7692         "SparseSegmentSqrtN", name, _ctx._post_execution_callbacks, data,
7693         indices, segment_ids)
7694       return _result
7695     except _core._FallbackException:
7696       return sparse_segment_sqrt_n_eager_fallback(
7697           data, indices, segment_ids, name=name, ctx=_ctx)
7698     except _core._NotOkStatusException as e:
7699       if name is not None:
7700         message = e.message + " name: " + name
7701       else:
7702         message = e.message
7703       _six.raise_from(_core._status_to_exception(e.code, message), None)
7704 
7705 
7706 def sparse_segment_sqrt_n_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
7707   r"""This is the slowpath function for Eager mode.
7708   This is for function sparse_segment_sqrt_n
7709   """
7710   _ctx = ctx if ctx else _context.context()
7711   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
7712   _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
7713   segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
7714   _inputs_flat = [data, indices, segment_ids]
7715   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
7716   _result = _execute.execute(b"SparseSegmentSqrtN", 1, inputs=_inputs_flat,
7717                              attrs=_attrs, ctx=_ctx, name=name)
7718   _execute.record_gradient(
7719       "SparseSegmentSqrtN", _inputs_flat, _attrs, _result, name)
7720   _result, = _result
7721   return _result
7722 
7723 
7724 def sparse_segment_sqrt_n_grad(grad, indices, segment_ids, output_dim0, name=None):
7725   r"""Computes gradients for SparseSegmentSqrtN.
7726 
7727   Returns tensor "output" with same shape as grad, except for dimension 0 whose
7728   value is output_dim0.
7729 
7730   Args:
7731     grad: A `Tensor`. Must be one of the following types: `float32`, `float64`.
7732       gradient propagated to the SparseSegmentSqrtN op.
7733     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7734       indices passed to the corresponding SparseSegmentSqrtN op.
7735     segment_ids: A `Tensor` of type `int32`.
7736       segment_ids passed to the corresponding SparseSegmentSqrtN op.
7737     output_dim0: A `Tensor` of type `int32`.
7738       dimension 0 of "data" passed to SparseSegmentSqrtN op.
7739     name: A name for the operation (optional).
7740 
7741   Returns:
7742     A `Tensor`. Has the same type as `grad`.
7743   """
7744   _ctx = _context._context
7745   if _ctx is None or not _ctx._eager_context.is_eager:
7746     _, _, _op = _op_def_lib._apply_op_helper(
7747         "SparseSegmentSqrtNGrad", grad=grad, indices=indices,
7748         segment_ids=segment_ids, output_dim0=output_dim0, name=name)
7749     _result = _op.outputs[:]
7750     _inputs_flat = _op.inputs
7751     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
7752     _execute.record_gradient(
7753       "SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result, name)
7754     _result, = _result
7755     return _result
7756 
7757   else:
7758     try:
7759       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7760         _ctx._context_handle, _ctx._eager_context.device_name,
7761         "SparseSegmentSqrtNGrad", name, _ctx._post_execution_callbacks, grad,
7762         indices, segment_ids, output_dim0)
7763       return _result
7764     except _core._FallbackException:
7765       return sparse_segment_sqrt_n_grad_eager_fallback(
7766           grad, indices, segment_ids, output_dim0, name=name, ctx=_ctx)
7767     except _core._NotOkStatusException as e:
7768       if name is not None:
7769         message = e.message + " name: " + name
7770       else:
7771         message = e.message
7772       _six.raise_from(_core._status_to_exception(e.code, message), None)
7773 
7774 
7775 def sparse_segment_sqrt_n_grad_eager_fallback(grad, indices, segment_ids, output_dim0, name=None, ctx=None):
7776   r"""This is the slowpath function for Eager mode.
7777   This is for function sparse_segment_sqrt_n_grad
7778   """
7779   _ctx = ctx if ctx else _context.context()
7780   _attr_T, (grad,) = _execute.args_to_matching_eager([grad], _ctx)
7781   _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
7782   segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
7783   output_dim0 = _ops.convert_to_tensor(output_dim0, _dtypes.int32)
7784   _inputs_flat = [grad, indices, segment_ids, output_dim0]
7785   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
7786   _result = _execute.execute(b"SparseSegmentSqrtNGrad", 1,
7787                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
7788                              name=name)
7789   _execute.record_gradient(
7790       "SparseSegmentSqrtNGrad", _inputs_flat, _attrs, _result, name)
7791   _result, = _result
7792   return _result
7793 
7794 
7795 def sparse_segment_sqrt_n_with_num_segments(data, indices, segment_ids, num_segments, name=None):
7796   r"""Computes the sum along sparse segments of a tensor divided by the sqrt of N.
7797 
7798   N is the size of the segment being reduced.
7799 
7800   Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is
7801   misisng, the `output` tensor at that position will be zeroed.
7802 
7803   Read
7804   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
7805   for an explanation of segments.
7806 
7807   Args:
7808     data: A `Tensor`. Must be one of the following types: `float32`, `float64`.
7809     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7810       A 1-D tensor. Has same rank as `segment_ids`.
7811     segment_ids: A `Tensor` of type `int32`.
7812       A 1-D tensor. Values should be sorted and can be repeated.
7813     num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7814       Should equal the number of distinct segment IDs.
7815     name: A name for the operation (optional).
7816 
7817   Returns:
7818     A `Tensor`. Has the same type as `data`.
7819   """
7820   _ctx = _context._context
7821   if _ctx is None or not _ctx._eager_context.is_eager:
7822     _, _, _op = _op_def_lib._apply_op_helper(
7823         "SparseSegmentSqrtNWithNumSegments", data=data, indices=indices,
7824         segment_ids=segment_ids, num_segments=num_segments, name=name)
7825     _result = _op.outputs[:]
7826     _inputs_flat = _op.inputs
7827     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
7828               "Tnumsegments", _op.get_attr("Tnumsegments"))
7829     _execute.record_gradient(
7830       "SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result, name)
7831     _result, = _result
7832     return _result
7833 
7834   else:
7835     try:
7836       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7837         _ctx._context_handle, _ctx._eager_context.device_name,
7838         "SparseSegmentSqrtNWithNumSegments", name,
7839         _ctx._post_execution_callbacks, data, indices, segment_ids,
7840         num_segments)
7841       return _result
7842     except _core._FallbackException:
7843       return sparse_segment_sqrt_n_with_num_segments_eager_fallback(
7844           data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
7845     except _core._NotOkStatusException as e:
7846       if name is not None:
7847         message = e.message + " name: " + name
7848       else:
7849         message = e.message
7850       _six.raise_from(_core._status_to_exception(e.code, message), None)
7851 
7852 
7853 def sparse_segment_sqrt_n_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
7854   r"""This is the slowpath function for Eager mode.
7855   This is for function sparse_segment_sqrt_n_with_num_segments
7856   """
7857   _ctx = ctx if ctx else _context.context()
7858   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
7859   _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
7860   _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
7861   segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
7862   _inputs_flat = [data, indices, segment_ids, num_segments]
7863   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
7864   _attr_Tnumsegments)
7865   _result = _execute.execute(b"SparseSegmentSqrtNWithNumSegments", 1,
7866                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
7867                              name=name)
7868   _execute.record_gradient(
7869       "SparseSegmentSqrtNWithNumSegments", _inputs_flat, _attrs, _result, name)
7870   _result, = _result
7871   return _result
7872 
7873 
7874 def sparse_segment_sum(data, indices, segment_ids, name=None):
7875   r"""Computes the sum along sparse segments of a tensor.
7876 
7877   Read
7878   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
7879   for an explanation of segments.
7880 
7881   Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first
7882   dimension, selecting a subset of dimension 0, specified by `indices`.
7883 
7884   For example:
7885 
7886   ```python
7887   c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
7888 
7889   # Select two rows, one segment.
7890   tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
7891   # => [[0 0 0 0]]
7892 
7893   # Select two rows, two segment.
7894   tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
7895   # => [[ 1  2  3  4]
7896   #     [-1 -2 -3 -4]]
7897 
7898   # Select all rows, two segments.
7899   tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
7900   # => [[0 0 0 0]
7901   #     [5 6 7 8]]
7902 
7903   # Which is equivalent to:
7904   tf.segment_sum(c, tf.constant([0, 0, 1]))
7905   ```
7906 
7907   Args:
7908     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
7909     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
7910       A 1-D tensor. Has same rank as `segment_ids`.
7911     segment_ids: A `Tensor` of type `int32`.
7912       A 1-D tensor. Values should be sorted and can be repeated.
7913     name: A name for the operation (optional).
7914 
7915   Returns:
7916     A `Tensor`. Has the same type as `data`.
7917   """
7918   _ctx = _context._context
7919   if _ctx is None or not _ctx._eager_context.is_eager:
7920     _, _, _op = _op_def_lib._apply_op_helper(
7921         "SparseSegmentSum", data=data, indices=indices,
7922         segment_ids=segment_ids, name=name)
7923     _result = _op.outputs[:]
7924     _inputs_flat = _op.inputs
7925     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"))
7926     _execute.record_gradient(
7927       "SparseSegmentSum", _inputs_flat, _attrs, _result, name)
7928     _result, = _result
7929     return _result
7930 
7931   else:
7932     try:
7933       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
7934         _ctx._context_handle, _ctx._eager_context.device_name,
7935         "SparseSegmentSum", name, _ctx._post_execution_callbacks, data,
7936         indices, segment_ids)
7937       return _result
7938     except _core._FallbackException:
7939       return sparse_segment_sum_eager_fallback(
7940           data, indices, segment_ids, name=name, ctx=_ctx)
7941     except _core._NotOkStatusException as e:
7942       if name is not None:
7943         message = e.message + " name: " + name
7944       else:
7945         message = e.message
7946       _six.raise_from(_core._status_to_exception(e.code, message), None)
7947 
7948 
7949 def sparse_segment_sum_eager_fallback(data, indices, segment_ids, name=None, ctx=None):
7950   r"""This is the slowpath function for Eager mode.
7951   This is for function sparse_segment_sum
7952   """
7953   _ctx = ctx if ctx else _context.context()
7954   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
7955   _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
7956   segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
7957   _inputs_flat = [data, indices, segment_ids]
7958   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx)
7959   _result = _execute.execute(b"SparseSegmentSum", 1, inputs=_inputs_flat,
7960                              attrs=_attrs, ctx=_ctx, name=name)
7961   _execute.record_gradient(
7962       "SparseSegmentSum", _inputs_flat, _attrs, _result, name)
7963   _result, = _result
7964   return _result
7965 
7966 
7967 def sparse_segment_sum_with_num_segments(data, indices, segment_ids, num_segments, name=None):
7968   r"""Computes the sum along sparse segments of a tensor.
7969 
7970   Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is
7971   misisng, the `output` tensor at that position will be zeroed.
7972 
7973   Read
7974   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
7975   for an explanation of segments.
7976 
7977   For example:
7978 
7979   ```python
7980   c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
7981 
7982   tf.sparse_segment_sum_with_num_segments(
7983       c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
7984   # => [[0 0 0 0]
7985   #     [0 0 0 0]
7986   #     [0 0 0 0]]
7987 
7988   tf.sparse_segment_sum_with_num_segments(c,
7989                                           tf.constant([0, 1]),
7990                                           tf.constant([0, 2],
7991                                           num_segments=4))
7992   # => [[ 1  2  3  4]
7993   #     [ 0  0  0  0]
7994   #     [-1 -2 -3 -4]
7995   #     [ 0  0  0  0]]
7996   ```
7997 
7998   Args:
7999     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
8000     indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8001       A 1-D tensor. Has same rank as `segment_ids`.
8002     segment_ids: A `Tensor` of type `int32`.
8003       A 1-D tensor. Values should be sorted and can be repeated.
8004     num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8005       Should equal the number of distinct segment IDs.
8006     name: A name for the operation (optional).
8007 
8008   Returns:
8009     A `Tensor`. Has the same type as `data`.
8010   """
8011   _ctx = _context._context
8012   if _ctx is None or not _ctx._eager_context.is_eager:
8013     _, _, _op = _op_def_lib._apply_op_helper(
8014         "SparseSegmentSumWithNumSegments", data=data, indices=indices,
8015         segment_ids=segment_ids, num_segments=num_segments, name=name)
8016     _result = _op.outputs[:]
8017     _inputs_flat = _op.inputs
8018     _attrs = ("T", _op.get_attr("T"), "Tidx", _op.get_attr("Tidx"),
8019               "Tnumsegments", _op.get_attr("Tnumsegments"))
8020     _execute.record_gradient(
8021       "SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result, name)
8022     _result, = _result
8023     return _result
8024 
8025   else:
8026     try:
8027       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8028         _ctx._context_handle, _ctx._eager_context.device_name,
8029         "SparseSegmentSumWithNumSegments", name,
8030         _ctx._post_execution_callbacks, data, indices, segment_ids,
8031         num_segments)
8032       return _result
8033     except _core._FallbackException:
8034       return sparse_segment_sum_with_num_segments_eager_fallback(
8035           data, indices, segment_ids, num_segments, name=name, ctx=_ctx)
8036     except _core._NotOkStatusException as e:
8037       if name is not None:
8038         message = e.message + " name: " + name
8039       else:
8040         message = e.message
8041       _six.raise_from(_core._status_to_exception(e.code, message), None)
8042 
8043 
8044 def sparse_segment_sum_with_num_segments_eager_fallback(data, indices, segment_ids, num_segments, name=None, ctx=None):
8045   r"""This is the slowpath function for Eager mode.
8046   This is for function sparse_segment_sum_with_num_segments
8047   """
8048   _ctx = ctx if ctx else _context.context()
8049   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
8050   _attr_Tidx, (indices,) = _execute.args_to_matching_eager([indices], _ctx, _dtypes.int32)
8051   _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
8052   segment_ids = _ops.convert_to_tensor(segment_ids, _dtypes.int32)
8053   _inputs_flat = [data, indices, segment_ids, num_segments]
8054   _attrs = ("T", _attr_T, "Tidx", _attr_Tidx, "Tnumsegments",
8055   _attr_Tnumsegments)
8056   _result = _execute.execute(b"SparseSegmentSumWithNumSegments", 1,
8057                              inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
8058                              name=name)
8059   _execute.record_gradient(
8060       "SparseSegmentSumWithNumSegments", _inputs_flat, _attrs, _result, name)
8061   _result, = _result
8062   return _result
8063 
8064 
8065 def sqrt(x, name=None):
8066   r"""Computes square root of x element-wise.
8067 
8068   I.e., \\(y = \sqrt{x} = x^{1/2}\\).
8069 
8070   Args:
8071     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
8072     name: A name for the operation (optional).
8073 
8074   Returns:
8075     A `Tensor`. Has the same type as `x`.
8076   """
8077   _ctx = _context._context
8078   if _ctx is None or not _ctx._eager_context.is_eager:
8079     _, _, _op = _op_def_lib._apply_op_helper(
8080         "Sqrt", x=x, name=name)
8081     _result = _op.outputs[:]
8082     _inputs_flat = _op.inputs
8083     _attrs = ("T", _op.get_attr("T"))
8084     _execute.record_gradient(
8085       "Sqrt", _inputs_flat, _attrs, _result, name)
8086     _result, = _result
8087     return _result
8088 
8089   else:
8090     try:
8091       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8092         _ctx._context_handle, _ctx._eager_context.device_name, "Sqrt", name,
8093         _ctx._post_execution_callbacks, x)
8094       return _result
8095     except _core._FallbackException:
8096       return sqrt_eager_fallback(
8097           x, name=name, ctx=_ctx)
8098     except _core._NotOkStatusException as e:
8099       if name is not None:
8100         message = e.message + " name: " + name
8101       else:
8102         message = e.message
8103       _six.raise_from(_core._status_to_exception(e.code, message), None)
8104 
8105 
8106 def sqrt_eager_fallback(x, name=None, ctx=None):
8107   r"""This is the slowpath function for Eager mode.
8108   This is for function sqrt
8109   """
8110   _ctx = ctx if ctx else _context.context()
8111   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
8112   _inputs_flat = [x]
8113   _attrs = ("T", _attr_T)
8114   _result = _execute.execute(b"Sqrt", 1, inputs=_inputs_flat, attrs=_attrs,
8115                              ctx=_ctx, name=name)
8116   _execute.record_gradient(
8117       "Sqrt", _inputs_flat, _attrs, _result, name)
8118   _result, = _result
8119   return _result
8120 
8121 
8122 def sqrt_grad(y, dy, name=None):
8123   r"""Computes the gradient for the sqrt of `x` wrt its input.
8124 
8125   Specifically, `grad = dy * 0.5 / y`, where `y = sqrt(x)`, and `dy`
8126   is the corresponding input gradient.
8127 
8128   Args:
8129     y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
8130     dy: A `Tensor`. Must have the same type as `y`.
8131     name: A name for the operation (optional).
8132 
8133   Returns:
8134     A `Tensor`. Has the same type as `y`.
8135   """
8136   _ctx = _context._context
8137   if _ctx is None or not _ctx._eager_context.is_eager:
8138     _, _, _op = _op_def_lib._apply_op_helper(
8139         "SqrtGrad", y=y, dy=dy, name=name)
8140     _result = _op.outputs[:]
8141     _inputs_flat = _op.inputs
8142     _attrs = ("T", _op.get_attr("T"))
8143     _execute.record_gradient(
8144       "SqrtGrad", _inputs_flat, _attrs, _result, name)
8145     _result, = _result
8146     return _result
8147 
8148   else:
8149     try:
8150       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8151         _ctx._context_handle, _ctx._eager_context.device_name, "SqrtGrad",
8152         name, _ctx._post_execution_callbacks, y, dy)
8153       return _result
8154     except _core._FallbackException:
8155       return sqrt_grad_eager_fallback(
8156           y, dy, name=name, ctx=_ctx)
8157     except _core._NotOkStatusException as e:
8158       if name is not None:
8159         message = e.message + " name: " + name
8160       else:
8161         message = e.message
8162       _six.raise_from(_core._status_to_exception(e.code, message), None)
8163 
8164 
8165 def sqrt_grad_eager_fallback(y, dy, name=None, ctx=None):
8166   r"""This is the slowpath function for Eager mode.
8167   This is for function sqrt_grad
8168   """
8169   _ctx = ctx if ctx else _context.context()
8170   _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
8171   (y, dy) = _inputs_T
8172   _inputs_flat = [y, dy]
8173   _attrs = ("T", _attr_T)
8174   _result = _execute.execute(b"SqrtGrad", 1, inputs=_inputs_flat,
8175                              attrs=_attrs, ctx=_ctx, name=name)
8176   _execute.record_gradient(
8177       "SqrtGrad", _inputs_flat, _attrs, _result, name)
8178   _result, = _result
8179   return _result
8180 
8181 
8182 def square(x, name=None):
8183   r"""Computes square of x element-wise.
8184 
8185   I.e., \\(y = x * x = x^2\\).
8186 
8187   Args:
8188     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
8189     name: A name for the operation (optional).
8190 
8191   Returns:
8192     A `Tensor`. Has the same type as `x`.
8193   """
8194   _ctx = _context._context
8195   if _ctx is None or not _ctx._eager_context.is_eager:
8196     _, _, _op = _op_def_lib._apply_op_helper(
8197         "Square", x=x, name=name)
8198     _result = _op.outputs[:]
8199     _inputs_flat = _op.inputs
8200     _attrs = ("T", _op.get_attr("T"))
8201     _execute.record_gradient(
8202       "Square", _inputs_flat, _attrs, _result, name)
8203     _result, = _result
8204     return _result
8205 
8206   else:
8207     try:
8208       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8209         _ctx._context_handle, _ctx._eager_context.device_name, "Square", name,
8210         _ctx._post_execution_callbacks, x)
8211       return _result
8212     except _core._FallbackException:
8213       return square_eager_fallback(
8214           x, name=name, ctx=_ctx)
8215     except _core._NotOkStatusException as e:
8216       if name is not None:
8217         message = e.message + " name: " + name
8218       else:
8219         message = e.message
8220       _six.raise_from(_core._status_to_exception(e.code, message), None)
8221 
8222 
8223 def square_eager_fallback(x, name=None, ctx=None):
8224   r"""This is the slowpath function for Eager mode.
8225   This is for function square
8226   """
8227   _ctx = ctx if ctx else _context.context()
8228   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
8229   _inputs_flat = [x]
8230   _attrs = ("T", _attr_T)
8231   _result = _execute.execute(b"Square", 1, inputs=_inputs_flat, attrs=_attrs,
8232                              ctx=_ctx, name=name)
8233   _execute.record_gradient(
8234       "Square", _inputs_flat, _attrs, _result, name)
8235   _result, = _result
8236   return _result
8237 
8238 
8239 @tf_export('math.squared_difference', 'squared_difference')
8240 @deprecated_endpoints('squared_difference')
8241 def squared_difference(x, y, name=None):
8242   r"""Returns (x - y)(x - y) element-wise.
8243 
8244   *NOTE*: `math.squared_difference` supports broadcasting. More about broadcasting
8245   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8246 
8247   Args:
8248     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
8249     y: A `Tensor`. Must have the same type as `x`.
8250     name: A name for the operation (optional).
8251 
8252   Returns:
8253     A `Tensor`. Has the same type as `x`.
8254   """
8255   _ctx = _context._context
8256   if _ctx is None or not _ctx._eager_context.is_eager:
8257     _, _, _op = _op_def_lib._apply_op_helper(
8258         "SquaredDifference", x=x, y=y, name=name)
8259     _result = _op.outputs[:]
8260     _inputs_flat = _op.inputs
8261     _attrs = ("T", _op.get_attr("T"))
8262     _execute.record_gradient(
8263       "SquaredDifference", _inputs_flat, _attrs, _result, name)
8264     _result, = _result
8265     return _result
8266 
8267   else:
8268     try:
8269       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8270         _ctx._context_handle, _ctx._eager_context.device_name,
8271         "SquaredDifference", name, _ctx._post_execution_callbacks, x, y)
8272       return _result
8273     except _core._FallbackException:
8274       return squared_difference_eager_fallback(
8275           x, y, name=name, ctx=_ctx)
8276     except _core._NotOkStatusException as e:
8277       if name is not None:
8278         message = e.message + " name: " + name
8279       else:
8280         message = e.message
8281       _six.raise_from(_core._status_to_exception(e.code, message), None)
8282 
8283 
8284 def squared_difference_eager_fallback(x, y, name=None, ctx=None):
8285   r"""This is the slowpath function for Eager mode.
8286   This is for function squared_difference
8287   """
8288   _ctx = ctx if ctx else _context.context()
8289   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
8290   (x, y) = _inputs_T
8291   _inputs_flat = [x, y]
8292   _attrs = ("T", _attr_T)
8293   _result = _execute.execute(b"SquaredDifference", 1, inputs=_inputs_flat,
8294                              attrs=_attrs, ctx=_ctx, name=name)
8295   _execute.record_gradient(
8296       "SquaredDifference", _inputs_flat, _attrs, _result, name)
8297   _result, = _result
8298   return _result
8299 
8300 
8301 def sub(x, y, name=None):
8302   r"""Returns x - y element-wise.
8303 
8304   *NOTE*: `Subtract` supports broadcasting. More about broadcasting
8305   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8306 
8307   Args:
8308     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
8309     y: A `Tensor`. Must have the same type as `x`.
8310     name: A name for the operation (optional).
8311 
8312   Returns:
8313     A `Tensor`. Has the same type as `x`.
8314   """
8315   _ctx = _context._context
8316   if _ctx is None or not _ctx._eager_context.is_eager:
8317     _, _, _op = _op_def_lib._apply_op_helper(
8318         "Sub", x=x, y=y, name=name)
8319     _result = _op.outputs[:]
8320     _inputs_flat = _op.inputs
8321     _attrs = ("T", _op.get_attr("T"))
8322     _execute.record_gradient(
8323       "Sub", _inputs_flat, _attrs, _result, name)
8324     _result, = _result
8325     return _result
8326 
8327   else:
8328     try:
8329       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8330         _ctx._context_handle, _ctx._eager_context.device_name, "Sub", name,
8331         _ctx._post_execution_callbacks, x, y)
8332       return _result
8333     except _core._FallbackException:
8334       return sub_eager_fallback(
8335           x, y, name=name, ctx=_ctx)
8336     except _core._NotOkStatusException as e:
8337       if name is not None:
8338         message = e.message + " name: " + name
8339       else:
8340         message = e.message
8341       _six.raise_from(_core._status_to_exception(e.code, message), None)
8342 
8343 
8344 def sub_eager_fallback(x, y, name=None, ctx=None):
8345   r"""This is the slowpath function for Eager mode.
8346   This is for function sub
8347   """
8348   _ctx = ctx if ctx else _context.context()
8349   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
8350   (x, y) = _inputs_T
8351   _inputs_flat = [x, y]
8352   _attrs = ("T", _attr_T)
8353   _result = _execute.execute(b"Sub", 1, inputs=_inputs_flat, attrs=_attrs,
8354                              ctx=_ctx, name=name)
8355   _execute.record_gradient(
8356       "Sub", _inputs_flat, _attrs, _result, name)
8357   _result, = _result
8358   return _result
8359 
8360 
8361 def _sum(input, axis, keep_dims=False, name=None):
8362   r"""Computes the sum of elements across dimensions of a tensor.
8363 
8364   Reduces `input` along the dimensions given in `axis`. Unless
8365   `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
8366   `axis`. If `keep_dims` is true, the reduced dimensions are
8367   retained with length 1.
8368 
8369   Args:
8370     input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
8371       The tensor to reduce.
8372     axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8373       The dimensions to reduce. Must be in the range
8374       `[-rank(input), rank(input))`.
8375     keep_dims: An optional `bool`. Defaults to `False`.
8376       If true, retain reduced dimensions with length 1.
8377     name: A name for the operation (optional).
8378 
8379   Returns:
8380     A `Tensor`. Has the same type as `input`.
8381   """
8382   _ctx = _context._context
8383   if _ctx is None or not _ctx._eager_context.is_eager:
8384     if keep_dims is None:
8385       keep_dims = False
8386     keep_dims = _execute.make_bool(keep_dims, "keep_dims")
8387     _, _, _op = _op_def_lib._apply_op_helper(
8388         "Sum", input=input, reduction_indices=axis, keep_dims=keep_dims,
8389         name=name)
8390     _result = _op.outputs[:]
8391     _inputs_flat = _op.inputs
8392     _attrs = ("keep_dims", _op.get_attr("keep_dims"), "T", _op.get_attr("T"),
8393               "Tidx", _op.get_attr("Tidx"))
8394     _execute.record_gradient(
8395       "Sum", _inputs_flat, _attrs, _result, name)
8396     _result, = _result
8397     return _result
8398 
8399   else:
8400     try:
8401       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8402         _ctx._context_handle, _ctx._eager_context.device_name, "Sum", name,
8403         _ctx._post_execution_callbacks, input, axis, "keep_dims", keep_dims)
8404       return _result
8405     except _core._FallbackException:
8406       return _sum_eager_fallback(
8407           input, axis, keep_dims=keep_dims, name=name, ctx=_ctx)
8408     except _core._NotOkStatusException as e:
8409       if name is not None:
8410         message = e.message + " name: " + name
8411       else:
8412         message = e.message
8413       _six.raise_from(_core._status_to_exception(e.code, message), None)
8414 
8415 
8416 def _sum_eager_fallback(input, axis, keep_dims=False, name=None, ctx=None):
8417   r"""This is the slowpath function for Eager mode.
8418   This is for function _sum
8419   """
8420   _ctx = ctx if ctx else _context.context()
8421   if keep_dims is None:
8422     keep_dims = False
8423   keep_dims = _execute.make_bool(keep_dims, "keep_dims")
8424   _attr_T, (input,) = _execute.args_to_matching_eager([input], _ctx)
8425   _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], _ctx, _dtypes.int32)
8426   _inputs_flat = [input, axis]
8427   _attrs = ("keep_dims", keep_dims, "T", _attr_T, "Tidx", _attr_Tidx)
8428   _result = _execute.execute(b"Sum", 1, inputs=_inputs_flat, attrs=_attrs,
8429                              ctx=_ctx, name=name)
8430   _execute.record_gradient(
8431       "Sum", _inputs_flat, _attrs, _result, name)
8432   _result, = _result
8433   return _result
8434 
8435 
8436 @tf_export('math.tan', 'tan')
8437 @deprecated_endpoints('tan')
8438 def tan(x, name=None):
8439   r"""Computes tan of x element-wise.
8440 
8441   Args:
8442     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
8443     name: A name for the operation (optional).
8444 
8445   Returns:
8446     A `Tensor`. Has the same type as `x`.
8447   """
8448   _ctx = _context._context
8449   if _ctx is None or not _ctx._eager_context.is_eager:
8450     _, _, _op = _op_def_lib._apply_op_helper(
8451         "Tan", x=x, name=name)
8452     _result = _op.outputs[:]
8453     _inputs_flat = _op.inputs
8454     _attrs = ("T", _op.get_attr("T"))
8455     _execute.record_gradient(
8456       "Tan", _inputs_flat, _attrs, _result, name)
8457     _result, = _result
8458     return _result
8459 
8460   else:
8461     try:
8462       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8463         _ctx._context_handle, _ctx._eager_context.device_name, "Tan", name,
8464         _ctx._post_execution_callbacks, x)
8465       return _result
8466     except _core._FallbackException:
8467       return tan_eager_fallback(
8468           x, name=name, ctx=_ctx)
8469     except _core._NotOkStatusException as e:
8470       if name is not None:
8471         message = e.message + " name: " + name
8472       else:
8473         message = e.message
8474       _six.raise_from(_core._status_to_exception(e.code, message), None)
8475 
8476 
8477 def tan_eager_fallback(x, name=None, ctx=None):
8478   r"""This is the slowpath function for Eager mode.
8479   This is for function tan
8480   """
8481   _ctx = ctx if ctx else _context.context()
8482   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
8483   _inputs_flat = [x]
8484   _attrs = ("T", _attr_T)
8485   _result = _execute.execute(b"Tan", 1, inputs=_inputs_flat, attrs=_attrs,
8486                              ctx=_ctx, name=name)
8487   _execute.record_gradient(
8488       "Tan", _inputs_flat, _attrs, _result, name)
8489   _result, = _result
8490   return _result
8491 
8492 
8493 def tanh(x, name=None):
8494   r"""Computes hyperbolic tangent of `x` element-wise.
8495 
8496   Args:
8497     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
8498     name: A name for the operation (optional).
8499 
8500   Returns:
8501     A `Tensor`. Has the same type as `x`.
8502   """
8503   _ctx = _context._context
8504   if _ctx is None or not _ctx._eager_context.is_eager:
8505     _, _, _op = _op_def_lib._apply_op_helper(
8506         "Tanh", x=x, name=name)
8507     _result = _op.outputs[:]
8508     _inputs_flat = _op.inputs
8509     _attrs = ("T", _op.get_attr("T"))
8510     _execute.record_gradient(
8511       "Tanh", _inputs_flat, _attrs, _result, name)
8512     _result, = _result
8513     return _result
8514 
8515   else:
8516     try:
8517       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8518         _ctx._context_handle, _ctx._eager_context.device_name, "Tanh", name,
8519         _ctx._post_execution_callbacks, x)
8520       return _result
8521     except _core._FallbackException:
8522       return tanh_eager_fallback(
8523           x, name=name, ctx=_ctx)
8524     except _core._NotOkStatusException as e:
8525       if name is not None:
8526         message = e.message + " name: " + name
8527       else:
8528         message = e.message
8529       _six.raise_from(_core._status_to_exception(e.code, message), None)
8530 
8531 
8532 def tanh_eager_fallback(x, name=None, ctx=None):
8533   r"""This is the slowpath function for Eager mode.
8534   This is for function tanh
8535   """
8536   _ctx = ctx if ctx else _context.context()
8537   _attr_T, (x,) = _execute.args_to_matching_eager([x], _ctx)
8538   _inputs_flat = [x]
8539   _attrs = ("T", _attr_T)
8540   _result = _execute.execute(b"Tanh", 1, inputs=_inputs_flat, attrs=_attrs,
8541                              ctx=_ctx, name=name)
8542   _execute.record_gradient(
8543       "Tanh", _inputs_flat, _attrs, _result, name)
8544   _result, = _result
8545   return _result
8546 
8547 
8548 def tanh_grad(y, dy, name=None):
8549   r"""Computes the gradient for the tanh of `x` wrt its input.
8550 
8551   Specifically, `grad = dy * (1 - y*y)`, where `y = tanh(x)`, and `dy`
8552   is the corresponding input gradient.
8553 
8554   Args:
8555     y: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `complex64`, `complex128`.
8556     dy: A `Tensor`. Must have the same type as `y`.
8557     name: A name for the operation (optional).
8558 
8559   Returns:
8560     A `Tensor`. Has the same type as `y`.
8561   """
8562   _ctx = _context._context
8563   if _ctx is None or not _ctx._eager_context.is_eager:
8564     _, _, _op = _op_def_lib._apply_op_helper(
8565         "TanhGrad", y=y, dy=dy, name=name)
8566     _result = _op.outputs[:]
8567     _inputs_flat = _op.inputs
8568     _attrs = ("T", _op.get_attr("T"))
8569     _execute.record_gradient(
8570       "TanhGrad", _inputs_flat, _attrs, _result, name)
8571     _result, = _result
8572     return _result
8573 
8574   else:
8575     try:
8576       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8577         _ctx._context_handle, _ctx._eager_context.device_name, "TanhGrad",
8578         name, _ctx._post_execution_callbacks, y, dy)
8579       return _result
8580     except _core._FallbackException:
8581       return tanh_grad_eager_fallback(
8582           y, dy, name=name, ctx=_ctx)
8583     except _core._NotOkStatusException as e:
8584       if name is not None:
8585         message = e.message + " name: " + name
8586       else:
8587         message = e.message
8588       _six.raise_from(_core._status_to_exception(e.code, message), None)
8589 
8590 
8591 def tanh_grad_eager_fallback(y, dy, name=None, ctx=None):
8592   r"""This is the slowpath function for Eager mode.
8593   This is for function tanh_grad
8594   """
8595   _ctx = ctx if ctx else _context.context()
8596   _attr_T, _inputs_T = _execute.args_to_matching_eager([y, dy], _ctx)
8597   (y, dy) = _inputs_T
8598   _inputs_flat = [y, dy]
8599   _attrs = ("T", _attr_T)
8600   _result = _execute.execute(b"TanhGrad", 1, inputs=_inputs_flat,
8601                              attrs=_attrs, ctx=_ctx, name=name)
8602   _execute.record_gradient(
8603       "TanhGrad", _inputs_flat, _attrs, _result, name)
8604   _result, = _result
8605   return _result
8606 
8607 
8608 def truncate_div(x, y, name=None):
8609   r"""Returns x / y element-wise for integer types.
8610 
8611   Truncation designates that negative numbers will round fractional quantities
8612   toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
8613   than Python semantics. See `FloorDiv` for a division function that matches
8614   Python Semantics.
8615 
8616   *NOTE*: `TruncateDiv` supports broadcasting. More about broadcasting
8617   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8618 
8619   Args:
8620     x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`, `int16`, `int32`, `int64`, `complex64`, `complex128`.
8621     y: A `Tensor`. Must have the same type as `x`.
8622     name: A name for the operation (optional).
8623 
8624   Returns:
8625     A `Tensor`. Has the same type as `x`.
8626   """
8627   _ctx = _context._context
8628   if _ctx is None or not _ctx._eager_context.is_eager:
8629     _, _, _op = _op_def_lib._apply_op_helper(
8630         "TruncateDiv", x=x, y=y, name=name)
8631     _result = _op.outputs[:]
8632     _inputs_flat = _op.inputs
8633     _attrs = ("T", _op.get_attr("T"))
8634     _execute.record_gradient(
8635       "TruncateDiv", _inputs_flat, _attrs, _result, name)
8636     _result, = _result
8637     return _result
8638 
8639   else:
8640     try:
8641       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8642         _ctx._context_handle, _ctx._eager_context.device_name, "TruncateDiv",
8643         name, _ctx._post_execution_callbacks, x, y)
8644       return _result
8645     except _core._FallbackException:
8646       return truncate_div_eager_fallback(
8647           x, y, name=name, ctx=_ctx)
8648     except _core._NotOkStatusException as e:
8649       if name is not None:
8650         message = e.message + " name: " + name
8651       else:
8652         message = e.message
8653       _six.raise_from(_core._status_to_exception(e.code, message), None)
8654 
8655 
8656 def truncate_div_eager_fallback(x, y, name=None, ctx=None):
8657   r"""This is the slowpath function for Eager mode.
8658   This is for function truncate_div
8659   """
8660   _ctx = ctx if ctx else _context.context()
8661   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
8662   (x, y) = _inputs_T
8663   _inputs_flat = [x, y]
8664   _attrs = ("T", _attr_T)
8665   _result = _execute.execute(b"TruncateDiv", 1, inputs=_inputs_flat,
8666                              attrs=_attrs, ctx=_ctx, name=name)
8667   _execute.record_gradient(
8668       "TruncateDiv", _inputs_flat, _attrs, _result, name)
8669   _result, = _result
8670   return _result
8671 
8672 
8673 def truncate_mod(x, y, name=None):
8674   r"""Returns element-wise remainder of division. This emulates C semantics in that
8675 
8676   the result here is consistent with a truncating divide. E.g. `truncate(x / y) *
8677   y + truncate_mod(x, y) = x`.
8678 
8679   *NOTE*: `TruncateMod` supports broadcasting. More about broadcasting
8680   [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
8681 
8682   Args:
8683     x: A `Tensor`. Must be one of the following types: `int32`, `int64`, `bfloat16`, `half`, `float32`, `float64`.
8684     y: A `Tensor`. Must have the same type as `x`.
8685     name: A name for the operation (optional).
8686 
8687   Returns:
8688     A `Tensor`. Has the same type as `x`.
8689   """
8690   _ctx = _context._context
8691   if _ctx is None or not _ctx._eager_context.is_eager:
8692     _, _, _op = _op_def_lib._apply_op_helper(
8693         "TruncateMod", x=x, y=y, name=name)
8694     _result = _op.outputs[:]
8695     _inputs_flat = _op.inputs
8696     _attrs = ("T", _op.get_attr("T"))
8697     _execute.record_gradient(
8698       "TruncateMod", _inputs_flat, _attrs, _result, name)
8699     _result, = _result
8700     return _result
8701 
8702   else:
8703     try:
8704       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8705         _ctx._context_handle, _ctx._eager_context.device_name, "TruncateMod",
8706         name, _ctx._post_execution_callbacks, x, y)
8707       return _result
8708     except _core._FallbackException:
8709       return truncate_mod_eager_fallback(
8710           x, y, name=name, ctx=_ctx)
8711     except _core._NotOkStatusException as e:
8712       if name is not None:
8713         message = e.message + " name: " + name
8714       else:
8715         message = e.message
8716       _six.raise_from(_core._status_to_exception(e.code, message), None)
8717 
8718 
8719 def truncate_mod_eager_fallback(x, y, name=None, ctx=None):
8720   r"""This is the slowpath function for Eager mode.
8721   This is for function truncate_mod
8722   """
8723   _ctx = ctx if ctx else _context.context()
8724   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
8725   (x, y) = _inputs_T
8726   _inputs_flat = [x, y]
8727   _attrs = ("T", _attr_T)
8728   _result = _execute.execute(b"TruncateMod", 1, inputs=_inputs_flat,
8729                              attrs=_attrs, ctx=_ctx, name=name)
8730   _execute.record_gradient(
8731       "TruncateMod", _inputs_flat, _attrs, _result, name)
8732   _result, = _result
8733   return _result
8734 
8735 
8736 @tf_export('math.unsorted_segment_max', 'unsorted_segment_max')
8737 @deprecated_endpoints('unsorted_segment_max')
8738 def unsorted_segment_max(data, segment_ids, num_segments, name=None):
8739   r"""Computes the maximum along segments of a tensor.
8740 
8741   Read
8742   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
8743   for an explanation of segments.
8744 
8745   This operator is similar to the unsorted segment sum operator found
8746   [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
8747   Instead of computing the sum over segments, it computes the maximum such that:
8748 
8749   \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such
8750   that `segment_ids[j...] == i`.
8751 
8752   If the maximum is empty for a given segment ID `i`, it outputs the smallest
8753   possible value for the specific numeric type,
8754   `output[i] = numeric_limits<T>::lowest()`.
8755 
8756   If the given segment ID `i` is negative, then the corresponding value is
8757   dropped, and will not be included in the result.
8758 
8759   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
8760   <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt>
8761   </div>
8762 
8763   Args:
8764     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
8765     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8766       A tensor whose shape is a prefix of `data.shape`.END
8767         }
8768         out_arg {
8769           name: "output"
8770           description: <<END
8771       Has same shape as data, except for the first `segment_ids.rank`
8772       dimensions, which are replaced with a single dimension which has size
8773       `num_segments`.
8774     num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8775     name: A name for the operation (optional).
8776 
8777   Returns:
8778     A `Tensor`. Has the same type as `data`.
8779   """
8780   _ctx = _context._context
8781   if _ctx is None or not _ctx._eager_context.is_eager:
8782     _, _, _op = _op_def_lib._apply_op_helper(
8783         "UnsortedSegmentMax", data=data, segment_ids=segment_ids,
8784         num_segments=num_segments, name=name)
8785     _result = _op.outputs[:]
8786     _inputs_flat = _op.inputs
8787     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
8788               "Tnumsegments", _op.get_attr("Tnumsegments"))
8789     _execute.record_gradient(
8790       "UnsortedSegmentMax", _inputs_flat, _attrs, _result, name)
8791     _result, = _result
8792     return _result
8793 
8794   else:
8795     try:
8796       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8797         _ctx._context_handle, _ctx._eager_context.device_name,
8798         "UnsortedSegmentMax", name, _ctx._post_execution_callbacks, data,
8799         segment_ids, num_segments)
8800       return _result
8801     except _core._FallbackException:
8802       return unsorted_segment_max_eager_fallback(
8803           data, segment_ids, num_segments, name=name, ctx=_ctx)
8804     except _core._NotOkStatusException as e:
8805       if name is not None:
8806         message = e.message + " name: " + name
8807       else:
8808         message = e.message
8809       _six.raise_from(_core._status_to_exception(e.code, message), None)
8810 
8811 
8812 def unsorted_segment_max_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
8813   r"""This is the slowpath function for Eager mode.
8814   This is for function unsorted_segment_max
8815   """
8816   _ctx = ctx if ctx else _context.context()
8817   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
8818   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
8819   _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
8820   _inputs_flat = [data, segment_ids, num_segments]
8821   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
8822   _attr_Tnumsegments)
8823   _result = _execute.execute(b"UnsortedSegmentMax", 1, inputs=_inputs_flat,
8824                              attrs=_attrs, ctx=_ctx, name=name)
8825   _execute.record_gradient(
8826       "UnsortedSegmentMax", _inputs_flat, _attrs, _result, name)
8827   _result, = _result
8828   return _result
8829 
8830 
8831 @tf_export('math.unsorted_segment_min', 'unsorted_segment_min')
8832 @deprecated_endpoints('unsorted_segment_min')
8833 def unsorted_segment_min(data, segment_ids, num_segments, name=None):
8834   r"""Computes the minimum along segments of a tensor.
8835 
8836   Read
8837   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)
8838   for an explanation of segments.
8839 
8840   This operator is similar to the unsorted segment sum operator found
8841   [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
8842   Instead of computing the sum over segments, it computes the minimum such that:
8843 
8844   \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such
8845   that `segment_ids[j...] == i`.
8846 
8847   If the minimum is empty for a given segment ID `i`, it outputs the largest
8848   possible value for the specific numeric type,
8849   `output[i] = numeric_limits<T>::max()`.
8850 
8851   If the given segment ID `i` is negative, then the corresponding value is
8852   dropped, and will not be included in the result.
8853 
8854   Args:
8855     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
8856     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8857       A tensor whose shape is a prefix of `data.shape`.
8858     num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8859     name: A name for the operation (optional).
8860 
8861   Returns:
8862     A `Tensor`. Has the same type as `data`.
8863   """
8864   _ctx = _context._context
8865   if _ctx is None or not _ctx._eager_context.is_eager:
8866     _, _, _op = _op_def_lib._apply_op_helper(
8867         "UnsortedSegmentMin", data=data, segment_ids=segment_ids,
8868         num_segments=num_segments, name=name)
8869     _result = _op.outputs[:]
8870     _inputs_flat = _op.inputs
8871     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
8872               "Tnumsegments", _op.get_attr("Tnumsegments"))
8873     _execute.record_gradient(
8874       "UnsortedSegmentMin", _inputs_flat, _attrs, _result, name)
8875     _result, = _result
8876     return _result
8877 
8878   else:
8879     try:
8880       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8881         _ctx._context_handle, _ctx._eager_context.device_name,
8882         "UnsortedSegmentMin", name, _ctx._post_execution_callbacks, data,
8883         segment_ids, num_segments)
8884       return _result
8885     except _core._FallbackException:
8886       return unsorted_segment_min_eager_fallback(
8887           data, segment_ids, num_segments, name=name, ctx=_ctx)
8888     except _core._NotOkStatusException as e:
8889       if name is not None:
8890         message = e.message + " name: " + name
8891       else:
8892         message = e.message
8893       _six.raise_from(_core._status_to_exception(e.code, message), None)
8894 
8895 
8896 def unsorted_segment_min_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
8897   r"""This is the slowpath function for Eager mode.
8898   This is for function unsorted_segment_min
8899   """
8900   _ctx = ctx if ctx else _context.context()
8901   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
8902   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
8903   _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
8904   _inputs_flat = [data, segment_ids, num_segments]
8905   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
8906   _attr_Tnumsegments)
8907   _result = _execute.execute(b"UnsortedSegmentMin", 1, inputs=_inputs_flat,
8908                              attrs=_attrs, ctx=_ctx, name=name)
8909   _execute.record_gradient(
8910       "UnsortedSegmentMin", _inputs_flat, _attrs, _result, name)
8911   _result, = _result
8912   return _result
8913 
8914 
8915 @tf_export('math.unsorted_segment_prod', 'unsorted_segment_prod')
8916 @deprecated_endpoints('unsorted_segment_prod')
8917 def unsorted_segment_prod(data, segment_ids, num_segments, name=None):
8918   r"""Computes the product along segments of a tensor.
8919 
8920   Read
8921   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)
8922   for an explanation of segments.
8923 
8924   This operator is similar to the unsorted segment sum operator found
8925   [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
8926   Instead of computing the sum over segments, it computes the product of all
8927   entries belonging to a segment such that:
8928 
8929   \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
8930   `j...` such that `segment_ids[j...] == i`.
8931 
8932   If there is no entry for a given segment ID `i`, it outputs 1.
8933 
8934   If the given segment ID `i` is negative, then the corresponding value is
8935   dropped, and will not be included in the result.
8936 
8937   Args:
8938     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
8939     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8940       A tensor whose shape is a prefix of `data.shape`.
8941     num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
8942     name: A name for the operation (optional).
8943 
8944   Returns:
8945     A `Tensor`. Has the same type as `data`.
8946   """
8947   _ctx = _context._context
8948   if _ctx is None or not _ctx._eager_context.is_eager:
8949     _, _, _op = _op_def_lib._apply_op_helper(
8950         "UnsortedSegmentProd", data=data, segment_ids=segment_ids,
8951         num_segments=num_segments, name=name)
8952     _result = _op.outputs[:]
8953     _inputs_flat = _op.inputs
8954     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
8955               "Tnumsegments", _op.get_attr("Tnumsegments"))
8956     _execute.record_gradient(
8957       "UnsortedSegmentProd", _inputs_flat, _attrs, _result, name)
8958     _result, = _result
8959     return _result
8960 
8961   else:
8962     try:
8963       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
8964         _ctx._context_handle, _ctx._eager_context.device_name,
8965         "UnsortedSegmentProd", name, _ctx._post_execution_callbacks, data,
8966         segment_ids, num_segments)
8967       return _result
8968     except _core._FallbackException:
8969       return unsorted_segment_prod_eager_fallback(
8970           data, segment_ids, num_segments, name=name, ctx=_ctx)
8971     except _core._NotOkStatusException as e:
8972       if name is not None:
8973         message = e.message + " name: " + name
8974       else:
8975         message = e.message
8976       _six.raise_from(_core._status_to_exception(e.code, message), None)
8977 
8978 
8979 def unsorted_segment_prod_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
8980   r"""This is the slowpath function for Eager mode.
8981   This is for function unsorted_segment_prod
8982   """
8983   _ctx = ctx if ctx else _context.context()
8984   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
8985   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
8986   _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
8987   _inputs_flat = [data, segment_ids, num_segments]
8988   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
8989   _attr_Tnumsegments)
8990   _result = _execute.execute(b"UnsortedSegmentProd", 1, inputs=_inputs_flat,
8991                              attrs=_attrs, ctx=_ctx, name=name)
8992   _execute.record_gradient(
8993       "UnsortedSegmentProd", _inputs_flat, _attrs, _result, name)
8994   _result, = _result
8995   return _result
8996 
8997 
8998 @tf_export('math.unsorted_segment_sum', 'unsorted_segment_sum')
8999 @deprecated_endpoints('unsorted_segment_sum')
9000 def unsorted_segment_sum(data, segment_ids, num_segments, name=None):
9001   r"""Computes the sum along segments of a tensor.
9002 
9003   Read
9004   [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
9005   for an explanation of segments.
9006 
9007   Computes a tensor such that
9008   \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such
9009   that `segment_ids[j...] == i`.  Unlike `SegmentSum`, `segment_ids`
9010   need not be sorted and need not cover all values in the full
9011   range of valid values.
9012 
9013   If the sum is empty for a given segment ID `i`, `output[i] = 0`.
9014   If the given segment ID `i` is negative, the value is dropped and will not be
9015   added to the sum of the segment.
9016 
9017   `num_segments` should equal the number of distinct segment IDs.
9018 
9019   <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
9020   <img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt>
9021   </div>
9022 
9023   Args:
9024     data: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
9025     segment_ids: A `Tensor`. Must be one of the following types: `int32`, `int64`.
9026       A tensor whose shape is a prefix of `data.shape`.
9027     num_segments: A `Tensor`. Must be one of the following types: `int32`, `int64`.
9028     name: A name for the operation (optional).
9029 
9030   Returns:
9031     A `Tensor`. Has the same type as `data`.
9032   """
9033   _ctx = _context._context
9034   if _ctx is None or not _ctx._eager_context.is_eager:
9035     _, _, _op = _op_def_lib._apply_op_helper(
9036         "UnsortedSegmentSum", data=data, segment_ids=segment_ids,
9037         num_segments=num_segments, name=name)
9038     _result = _op.outputs[:]
9039     _inputs_flat = _op.inputs
9040     _attrs = ("T", _op.get_attr("T"), "Tindices", _op.get_attr("Tindices"),
9041               "Tnumsegments", _op.get_attr("Tnumsegments"))
9042     _execute.record_gradient(
9043       "UnsortedSegmentSum", _inputs_flat, _attrs, _result, name)
9044     _result, = _result
9045     return _result
9046 
9047   else:
9048     try:
9049       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9050         _ctx._context_handle, _ctx._eager_context.device_name,
9051         "UnsortedSegmentSum", name, _ctx._post_execution_callbacks, data,
9052         segment_ids, num_segments)
9053       return _result
9054     except _core._FallbackException:
9055       return unsorted_segment_sum_eager_fallback(
9056           data, segment_ids, num_segments, name=name, ctx=_ctx)
9057     except _core._NotOkStatusException as e:
9058       if name is not None:
9059         message = e.message + " name: " + name
9060       else:
9061         message = e.message
9062       _six.raise_from(_core._status_to_exception(e.code, message), None)
9063 
9064 
9065 def unsorted_segment_sum_eager_fallback(data, segment_ids, num_segments, name=None, ctx=None):
9066   r"""This is the slowpath function for Eager mode.
9067   This is for function unsorted_segment_sum
9068   """
9069   _ctx = ctx if ctx else _context.context()
9070   _attr_T, (data,) = _execute.args_to_matching_eager([data], _ctx)
9071   _attr_Tindices, (segment_ids,) = _execute.args_to_matching_eager([segment_ids], _ctx)
9072   _attr_Tnumsegments, (num_segments,) = _execute.args_to_matching_eager([num_segments], _ctx, _dtypes.int32)
9073   _inputs_flat = [data, segment_ids, num_segments]
9074   _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "Tnumsegments",
9075   _attr_Tnumsegments)
9076   _result = _execute.execute(b"UnsortedSegmentSum", 1, inputs=_inputs_flat,
9077                              attrs=_attrs, ctx=_ctx, name=name)
9078   _execute.record_gradient(
9079       "UnsortedSegmentSum", _inputs_flat, _attrs, _result, name)
9080   _result, = _result
9081   return _result
9082 
9083 
9084 @tf_export('math.xdivy')
9085 def xdivy(x, y, name=None):
9086   r"""Returns 0 if x == 0, and x / y otherwise, elementwise.
9087 
9088   Args:
9089     x: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `complex64`, `complex128`.
9090     y: A `Tensor`. Must have the same type as `x`.
9091     name: A name for the operation (optional).
9092 
9093   Returns:
9094     A `Tensor`. Has the same type as `x`.
9095   """
9096   _ctx = _context._context
9097   if _ctx is None or not _ctx._eager_context.is_eager:
9098     _, _, _op = _op_def_lib._apply_op_helper(
9099         "Xdivy", x=x, y=y, name=name)
9100     _result = _op.outputs[:]
9101     _inputs_flat = _op.inputs
9102     _attrs = ("T", _op.get_attr("T"))
9103     _execute.record_gradient(
9104       "Xdivy", _inputs_flat, _attrs, _result, name)
9105     _result, = _result
9106     return _result
9107 
9108   else:
9109     try:
9110       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9111         _ctx._context_handle, _ctx._eager_context.device_name, "Xdivy", name,
9112         _ctx._post_execution_callbacks, x, y)
9113       return _result
9114     except _core._FallbackException:
9115       return xdivy_eager_fallback(
9116           x, y, name=name, ctx=_ctx)
9117     except _core._NotOkStatusException as e:
9118       if name is not None:
9119         message = e.message + " name: " + name
9120       else:
9121         message = e.message
9122       _six.raise_from(_core._status_to_exception(e.code, message), None)
9123 
9124 
9125 def xdivy_eager_fallback(x, y, name=None, ctx=None):
9126   r"""This is the slowpath function for Eager mode.
9127   This is for function xdivy
9128   """
9129   _ctx = ctx if ctx else _context.context()
9130   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
9131   (x, y) = _inputs_T
9132   _inputs_flat = [x, y]
9133   _attrs = ("T", _attr_T)
9134   _result = _execute.execute(b"Xdivy", 1, inputs=_inputs_flat, attrs=_attrs,
9135                              ctx=_ctx, name=name)
9136   _execute.record_gradient(
9137       "Xdivy", _inputs_flat, _attrs, _result, name)
9138   _result, = _result
9139   return _result
9140 
9141 
9142 @tf_export('math.xlogy')
9143 def xlogy(x, y, name=None):
9144   r"""Returns 0 if x == 0, and x * log(y) otherwise, elementwise.
9145 
9146   Args:
9147     x: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `complex64`, `complex128`.
9148     y: A `Tensor`. Must have the same type as `x`.
9149     name: A name for the operation (optional).
9150 
9151   Returns:
9152     A `Tensor`. Has the same type as `x`.
9153   """
9154   _ctx = _context._context
9155   if _ctx is None or not _ctx._eager_context.is_eager:
9156     _, _, _op = _op_def_lib._apply_op_helper(
9157         "Xlogy", x=x, y=y, name=name)
9158     _result = _op.outputs[:]
9159     _inputs_flat = _op.inputs
9160     _attrs = ("T", _op.get_attr("T"))
9161     _execute.record_gradient(
9162       "Xlogy", _inputs_flat, _attrs, _result, name)
9163     _result, = _result
9164     return _result
9165 
9166   else:
9167     try:
9168       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9169         _ctx._context_handle, _ctx._eager_context.device_name, "Xlogy", name,
9170         _ctx._post_execution_callbacks, x, y)
9171       return _result
9172     except _core._FallbackException:
9173       return xlogy_eager_fallback(
9174           x, y, name=name, ctx=_ctx)
9175     except _core._NotOkStatusException as e:
9176       if name is not None:
9177         message = e.message + " name: " + name
9178       else:
9179         message = e.message
9180       _six.raise_from(_core._status_to_exception(e.code, message), None)
9181 
9182 
9183 def xlogy_eager_fallback(x, y, name=None, ctx=None):
9184   r"""This is the slowpath function for Eager mode.
9185   This is for function xlogy
9186   """
9187   _ctx = ctx if ctx else _context.context()
9188   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], _ctx)
9189   (x, y) = _inputs_T
9190   _inputs_flat = [x, y]
9191   _attrs = ("T", _attr_T)
9192   _result = _execute.execute(b"Xlogy", 1, inputs=_inputs_flat, attrs=_attrs,
9193                              ctx=_ctx, name=name)
9194   _execute.record_gradient(
9195       "Xlogy", _inputs_flat, _attrs, _result, name)
9196   _result, = _result
9197   return _result
9198 
9199 
9200 @tf_export('math.zeta', 'zeta')
9201 @deprecated_endpoints('zeta')
9202 def zeta(x, q, name=None):
9203   r"""Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
9204 
9205   The Hurwitz zeta function is defined as:
9206 
9207 
9208   \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
9209 
9210   Args:
9211     x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
9212     q: A `Tensor`. Must have the same type as `x`.
9213     name: A name for the operation (optional).
9214 
9215   Returns:
9216     A `Tensor`. Has the same type as `x`.
9217   """
9218   _ctx = _context._context
9219   if _ctx is None or not _ctx._eager_context.is_eager:
9220     _, _, _op = _op_def_lib._apply_op_helper(
9221         "Zeta", x=x, q=q, name=name)
9222     _result = _op.outputs[:]
9223     _inputs_flat = _op.inputs
9224     _attrs = ("T", _op.get_attr("T"))
9225     _execute.record_gradient(
9226       "Zeta", _inputs_flat, _attrs, _result, name)
9227     _result, = _result
9228     return _result
9229 
9230   else:
9231     try:
9232       _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
9233         _ctx._context_handle, _ctx._eager_context.device_name, "Zeta", name,
9234         _ctx._post_execution_callbacks, x, q)
9235       return _result
9236     except _core._FallbackException:
9237       return zeta_eager_fallback(
9238           x, q, name=name, ctx=_ctx)
9239     except _core._NotOkStatusException as e:
9240       if name is not None:
9241         message = e.message + " name: " + name
9242       else:
9243         message = e.message
9244       _six.raise_from(_core._status_to_exception(e.code, message), None)
9245 
9246 
9247 def zeta_eager_fallback(x, q, name=None, ctx=None):
9248   r"""This is the slowpath function for Eager mode.
9249   This is for function zeta
9250   """
9251   _ctx = ctx if ctx else _context.context()
9252   _attr_T, _inputs_T = _execute.args_to_matching_eager([x, q], _ctx)
9253   (x, q) = _inputs_T
9254   _inputs_flat = [x, q]
9255   _attrs = ("T", _attr_T)
9256   _result = _execute.execute(b"Zeta", 1, inputs=_inputs_flat, attrs=_attrs,
9257                              ctx=_ctx, name=name)
9258   _execute.record_gradient(
9259       "Zeta", _inputs_flat, _attrs, _result, name)
9260   _result, = _result
9261   return _result
9262 
9263 def _InitOpDefLibrary(op_list_proto_bytes):
9264   op_list = _op_def_pb2.OpList()
9265   op_list.ParseFromString(op_list_proto_bytes)
9266   _op_def_registry.register_op_list(op_list)
9267   op_def_lib = _op_def_library.OpDefLibrary()
9268   op_def_lib.add_op_list(op_list)
9269   return op_def_lib
9270 # op {
9271 #   name: "Abs"
9272 #   input_arg {
9273 #     name: "x"
9274 #     type_attr: "T"
9275 #   }
9276 #   output_arg {
9277 #     name: "y"
9278 #     type_attr: "T"
9279 #   }
9280 #   attr {
9281 #     name: "T"
9282 #     type: "type"
9283 #     allowed_values {
9284 #       list {
9285 #         type: DT_BFLOAT16
9286 #         type: DT_HALF
9287 #         type: DT_FLOAT
9288 #         type: DT_DOUBLE
9289 #         type: DT_INT32
9290 #         type: DT_INT64
9291 #       }
9292 #     }
9293 #   }
9294 # }
9295 # op {
9296 #   name: "AccumulateNV2"
9297 #   input_arg {
9298 #     name: "inputs"
9299 #     type_attr: "T"
9300 #     number_attr: "N"
9301 #   }
9302 #   output_arg {
9303 #     name: "sum"
9304 #     type_attr: "T"
9305 #   }
9306 #   attr {
9307 #     name: "N"
9308 #     type: "int"
9309 #     has_minimum: true
9310 #     minimum: 1
9311 #   }
9312 #   attr {
9313 #     name: "T"
9314 #     type: "type"
9315 #     allowed_values {
9316 #       list {
9317 #         type: DT_FLOAT
9318 #         type: DT_DOUBLE
9319 #         type: DT_INT32
9320 #         type: DT_UINT8
9321 #         type: DT_INT16
9322 #         type: DT_INT8
9323 #         type: DT_COMPLEX64
9324 #         type: DT_INT64
9325 #         type: DT_QINT8
9326 #         type: DT_QUINT8
9327 #         type: DT_QINT32
9328 #         type: DT_BFLOAT16
9329 #         type: DT_UINT16
9330 #         type: DT_COMPLEX128
9331 #         type: DT_HALF
9332 #         type: DT_UINT32
9333 #         type: DT_UINT64
9334 #       }
9335 #     }
9336 #   }
9337 #   attr {
9338 #     name: "shape"
9339 #     type: "shape"
9340 #   }
9341 #   is_aggregate: true
9342 #   is_commutative: true
9343 # }
9344 # op {
9345 #   name: "Acos"
9346 #   input_arg {
9347 #     name: "x"
9348 #     type_attr: "T"
9349 #   }
9350 #   output_arg {
9351 #     name: "y"
9352 #     type_attr: "T"
9353 #   }
9354 #   attr {
9355 #     name: "T"
9356 #     type: "type"
9357 #     allowed_values {
9358 #       list {
9359 #         type: DT_BFLOAT16
9360 #         type: DT_HALF
9361 #         type: DT_FLOAT
9362 #         type: DT_DOUBLE
9363 #         type: DT_INT32
9364 #         type: DT_INT64
9365 #         type: DT_COMPLEX64
9366 #         type: DT_COMPLEX128
9367 #       }
9368 #     }
9369 #   }
9370 # }
9371 # op {
9372 #   name: "Acosh"
9373 #   input_arg {
9374 #     name: "x"
9375 #     type_attr: "T"
9376 #   }
9377 #   output_arg {
9378 #     name: "y"
9379 #     type_attr: "T"
9380 #   }
9381 #   attr {
9382 #     name: "T"
9383 #     type: "type"
9384 #     allowed_values {
9385 #       list {
9386 #         type: DT_BFLOAT16
9387 #         type: DT_HALF
9388 #         type: DT_FLOAT
9389 #         type: DT_DOUBLE
9390 #         type: DT_COMPLEX64
9391 #         type: DT_COMPLEX128
9392 #       }
9393 #     }
9394 #   }
9395 # }
9396 # op {
9397 #   name: "Add"
9398 #   input_arg {
9399 #     name: "x"
9400 #     type_attr: "T"
9401 #   }
9402 #   input_arg {
9403 #     name: "y"
9404 #     type_attr: "T"
9405 #   }
9406 #   output_arg {
9407 #     name: "z"
9408 #     type_attr: "T"
9409 #   }
9410 #   attr {
9411 #     name: "T"
9412 #     type: "type"
9413 #     allowed_values {
9414 #       list {
9415 #         type: DT_BFLOAT16
9416 #         type: DT_HALF
9417 #         type: DT_FLOAT
9418 #         type: DT_DOUBLE
9419 #         type: DT_UINT8
9420 #         type: DT_INT8
9421 #         type: DT_INT16
9422 #         type: DT_INT32
9423 #         type: DT_INT64
9424 #         type: DT_COMPLEX64
9425 #         type: DT_COMPLEX128
9426 #         type: DT_STRING
9427 #       }
9428 #     }
9429 #   }
9430 # }
9431 # op {
9432 #   name: "AddN"
9433 #   input_arg {
9434 #     name: "inputs"
9435 #     type_attr: "T"
9436 #     number_attr: "N"
9437 #   }
9438 #   output_arg {
9439 #     name: "sum"
9440 #     type_attr: "T"
9441 #   }
9442 #   attr {
9443 #     name: "N"
9444 #     type: "int"
9445 #     has_minimum: true
9446 #     minimum: 1
9447 #   }
9448 #   attr {
9449 #     name: "T"
9450 #     type: "type"
9451 #     allowed_values {
9452 #       list {
9453 #         type: DT_FLOAT
9454 #         type: DT_DOUBLE
9455 #         type: DT_INT32
9456 #         type: DT_UINT8
9457 #         type: DT_INT16
9458 #         type: DT_INT8
9459 #         type: DT_COMPLEX64
9460 #         type: DT_INT64
9461 #         type: DT_QINT8
9462 #         type: DT_QUINT8
9463 #         type: DT_QINT32
9464 #         type: DT_BFLOAT16
9465 #         type: DT_UINT16
9466 #         type: DT_COMPLEX128
9467 #         type: DT_HALF
9468 #         type: DT_UINT32
9469 #         type: DT_UINT64
9470 #         type: DT_VARIANT
9471 #       }
9472 #     }
9473 #   }
9474 #   is_aggregate: true
9475 #   is_commutative: true
9476 # }
9477 # op {
9478 #   name: "AddV2"
9479 #   input_arg {
9480 #     name: "x"
9481 #     type_attr: "T"
9482 #   }
9483 #   input_arg {
9484 #     name: "y"
9485 #     type_attr: "T"
9486 #   }
9487 #   output_arg {
9488 #     name: "z"
9489 #     type_attr: "T"
9490 #   }
9491 #   attr {
9492 #     name: "T"
9493 #     type: "type"
9494 #     allowed_values {
9495 #       list {
9496 #         type: DT_BFLOAT16
9497 #         type: DT_HALF
9498 #         type: DT_FLOAT
9499 #         type: DT_DOUBLE
9500 #         type: DT_UINT8
9501 #         type: DT_INT8
9502 #         type: DT_INT16
9503 #         type: DT_INT32
9504 #         type: DT_INT64
9505 #         type: DT_COMPLEX64
9506 #         type: DT_COMPLEX128
9507 #       }
9508 #     }
9509 #   }
9510 #   is_aggregate: true
9511 #   is_commutative: true
9512 # }
9513 # op {
9514 #   name: "All"
9515 #   input_arg {
9516 #     name: "input"
9517 #     type: DT_BOOL
9518 #   }
9519 #   input_arg {
9520 #     name: "reduction_indices"
9521 #     type_attr: "Tidx"
9522 #   }
9523 #   output_arg {
9524 #     name: "output"
9525 #     type: DT_BOOL
9526 #   }
9527 #   attr {
9528 #     name: "keep_dims"
9529 #     type: "bool"
9530 #     default_value {
9531 #       b: false
9532 #     }
9533 #   }
9534 #   attr {
9535 #     name: "Tidx"
9536 #     type: "type"
9537 #     default_value {
9538 #       type: DT_INT32
9539 #     }
9540 #     allowed_values {
9541 #       list {
9542 #         type: DT_INT32
9543 #         type: DT_INT64
9544 #       }
9545 #     }
9546 #   }
9547 # }
9548 # op {
9549 #   name: "Angle"
9550 #   input_arg {
9551 #     name: "input"
9552 #     type_attr: "T"
9553 #   }
9554 #   output_arg {
9555 #     name: "output"
9556 #     type_attr: "Tout"
9557 #   }
9558 #   attr {
9559 #     name: "T"
9560 #     type: "type"
9561 #     default_value {
9562 #       type: DT_COMPLEX64
9563 #     }
9564 #     allowed_values {
9565 #       list {
9566 #         type: DT_COMPLEX64
9567 #         type: DT_COMPLEX128
9568 #       }
9569 #     }
9570 #   }
9571 #   attr {
9572 #     name: "Tout"
9573 #     type: "type"
9574 #     default_value {
9575 #       type: DT_FLOAT
9576 #     }
9577 #     allowed_values {
9578 #       list {
9579 #         type: DT_FLOAT
9580 #         type: DT_DOUBLE
9581 #       }
9582 #     }
9583 #   }
9584 # }
9585 # op {
9586 #   name: "Any"
9587 #   input_arg {
9588 #     name: "input"
9589 #     type: DT_BOOL
9590 #   }
9591 #   input_arg {
9592 #     name: "reduction_indices"
9593 #     type_attr: "Tidx"
9594 #   }
9595 #   output_arg {
9596 #     name: "output"
9597 #     type: DT_BOOL
9598 #   }
9599 #   attr {
9600 #     name: "keep_dims"
9601 #     type: "bool"
9602 #     default_value {
9603 #       b: false
9604 #     }
9605 #   }
9606 #   attr {
9607 #     name: "Tidx"
9608 #     type: "type"
9609 #     default_value {
9610 #       type: DT_INT32
9611 #     }
9612 #     allowed_values {
9613 #       list {
9614 #         type: DT_INT32
9615 #         type: DT_INT64
9616 #       }
9617 #     }
9618 #   }
9619 # }
9620 # op {
9621 #   name: "ApproximateEqual"
9622 #   input_arg {
9623 #     name: "x"
9624 #     type_attr: "T"
9625 #   }
9626 #   input_arg {
9627 #     name: "y"
9628 #     type_attr: "T"
9629 #   }
9630 #   output_arg {
9631 #     name: "z"
9632 #     type: DT_BOOL
9633 #   }
9634 #   attr {
9635 #     name: "T"
9636 #     type: "type"
9637 #     allowed_values {
9638 #       list {
9639 #         type: DT_FLOAT
9640 #         type: DT_DOUBLE
9641 #         type: DT_INT32
9642 #         type: DT_UINT8
9643 #         type: DT_INT16
9644 #         type: DT_INT8
9645 #         type: DT_COMPLEX64
9646 #         type: DT_INT64
9647 #         type: DT_QINT8
9648 #         type: DT_QUINT8
9649 #         type: DT_QINT32
9650 #         type: DT_BFLOAT16
9651 #         type: DT_UINT16
9652 #         type: DT_COMPLEX128
9653 #         type: DT_HALF
9654 #         type: DT_UINT32
9655 #         type: DT_UINT64
9656 #       }
9657 #     }
9658 #   }
9659 #   attr {
9660 #     name: "tolerance"
9661 #     type: "float"
9662 #     default_value {
9663 #       f: 1e-05
9664 #     }
9665 #   }
9666 #   is_commutative: true
9667 # }
9668 # op {
9669 #   name: "ArgMax"
9670 #   input_arg {
9671 #     name: "input"
9672 #     type_attr: "T"
9673 #   }
9674 #   input_arg {
9675 #     name: "dimension"
9676 #     type_attr: "Tidx"
9677 #   }
9678 #   output_arg {
9679 #     name: "output"
9680 #     type_attr: "output_type"
9681 #   }
9682 #   attr {
9683 #     name: "T"
9684 #     type: "type"
9685 #     allowed_values {
9686 #       list {
9687 #         type: DT_FLOAT
9688 #         type: DT_DOUBLE
9689 #         type: DT_INT32
9690 #         type: DT_UINT8
9691 #         type: DT_INT16
9692 #         type: DT_INT8
9693 #         type: DT_COMPLEX64
9694 #         type: DT_INT64
9695 #         type: DT_QINT8
9696 #         type: DT_QUINT8
9697 #         type: DT_QINT32
9698 #         type: DT_BFLOAT16
9699 #         type: DT_UINT16
9700 #         type: DT_COMPLEX128
9701 #         type: DT_HALF
9702 #         type: DT_UINT32
9703 #         type: DT_UINT64
9704 #       }
9705 #     }
9706 #   }
9707 #   attr {
9708 #     name: "Tidx"
9709 #     type: "type"
9710 #     default_value {
9711 #       type: DT_INT32
9712 #     }
9713 #     allowed_values {
9714 #       list {
9715 #         type: DT_INT32
9716 #         type: DT_INT64
9717 #       }
9718 #     }
9719 #   }
9720 #   attr {
9721 #     name: "output_type"
9722 #     type: "type"
9723 #     default_value {
9724 #       type: DT_INT64
9725 #     }
9726 #     allowed_values {
9727 #       list {
9728 #         type: DT_INT32
9729 #         type: DT_INT64
9730 #       }
9731 #     }
9732 #   }
9733 # }
9734 # op {
9735 #   name: "ArgMin"
9736 #   input_arg {
9737 #     name: "input"
9738 #     type_attr: "T"
9739 #   }
9740 #   input_arg {
9741 #     name: "dimension"
9742 #     type_attr: "Tidx"
9743 #   }
9744 #   output_arg {
9745 #     name: "output"
9746 #     type_attr: "output_type"
9747 #   }
9748 #   attr {
9749 #     name: "T"
9750 #     type: "type"
9751 #     allowed_values {
9752 #       list {
9753 #         type: DT_FLOAT
9754 #         type: DT_DOUBLE
9755 #         type: DT_INT32
9756 #         type: DT_UINT8
9757 #         type: DT_INT16
9758 #         type: DT_INT8
9759 #         type: DT_COMPLEX64
9760 #         type: DT_INT64
9761 #         type: DT_QINT8
9762 #         type: DT_QUINT8
9763 #         type: DT_QINT32
9764 #         type: DT_BFLOAT16
9765 #         type: DT_UINT16
9766 #         type: DT_COMPLEX128
9767 #         type: DT_HALF
9768 #         type: DT_UINT32
9769 #         type: DT_UINT64
9770 #       }
9771 #     }
9772 #   }
9773 #   attr {
9774 #     name: "Tidx"
9775 #     type: "type"
9776 #     default_value {
9777 #       type: DT_INT32
9778 #     }
9779 #     allowed_values {
9780 #       list {
9781 #         type: DT_INT32
9782 #         type: DT_INT64
9783 #       }
9784 #     }
9785 #   }
9786 #   attr {
9787 #     name: "output_type"
9788 #     type: "type"
9789 #     default_value {
9790 #       type: DT_INT64
9791 #     }
9792 #     allowed_values {
9793 #       list {
9794 #         type: DT_INT32
9795 #         type: DT_INT64
9796 #       }
9797 #     }
9798 #   }
9799 # }
9800 # op {
9801 #   name: "Asin"
9802 #   input_arg {
9803 #     name: "x"
9804 #     type_attr: "T"
9805 #   }
9806 #   output_arg {
9807 #     name: "y"
9808 #     type_attr: "T"
9809 #   }
9810 #   attr {
9811 #     name: "T"
9812 #     type: "type"
9813 #     allowed_values {
9814 #       list {
9815 #         type: DT_BFLOAT16
9816 #         type: DT_HALF
9817 #         type: DT_FLOAT
9818 #         type: DT_DOUBLE
9819 #         type: DT_INT32
9820 #         type: DT_INT64
9821 #         type: DT_COMPLEX64
9822 #         type: DT_COMPLEX128
9823 #       }
9824 #     }
9825 #   }
9826 # }
9827 # op {
9828 #   name: "Asinh"
9829 #   input_arg {
9830 #     name: "x"
9831 #     type_attr: "T"
9832 #   }
9833 #   output_arg {
9834 #     name: "y"
9835 #     type_attr: "T"
9836 #   }
9837 #   attr {
9838 #     name: "T"
9839 #     type: "type"
9840 #     allowed_values {
9841 #       list {
9842 #         type: DT_BFLOAT16
9843 #         type: DT_HALF
9844 #         type: DT_FLOAT
9845 #         type: DT_DOUBLE
9846 #         type: DT_COMPLEX64
9847 #         type: DT_COMPLEX128
9848 #       }
9849 #     }
9850 #   }
9851 # }
9852 # op {
9853 #   name: "Atan"
9854 #   input_arg {
9855 #     name: "x"
9856 #     type_attr: "T"
9857 #   }
9858 #   output_arg {
9859 #     name: "y"
9860 #     type_attr: "T"
9861 #   }
9862 #   attr {
9863 #     name: "T"
9864 #     type: "type"
9865 #     allowed_values {
9866 #       list {
9867 #         type: DT_BFLOAT16
9868 #         type: DT_HALF
9869 #         type: DT_FLOAT
9870 #         type: DT_DOUBLE
9871 #         type: DT_INT32
9872 #         type: DT_INT64
9873 #         type: DT_COMPLEX64
9874 #         type: DT_COMPLEX128
9875 #       }
9876 #     }
9877 #   }
9878 # }
9879 # op {
9880 #   name: "Atan2"
9881 #   input_arg {
9882 #     name: "y"
9883 #     type_attr: "T"
9884 #   }
9885 #   input_arg {
9886 #     name: "x"
9887 #     type_attr: "T"
9888 #   }
9889 #   output_arg {
9890 #     name: "z"
9891 #     type_attr: "T"
9892 #   }
9893 #   attr {
9894 #     name: "T"
9895 #     type: "type"
9896 #     allowed_values {
9897 #       list {
9898 #         type: DT_BFLOAT16
9899 #         type: DT_HALF
9900 #         type: DT_FLOAT
9901 #         type: DT_DOUBLE
9902 #       }
9903 #     }
9904 #   }
9905 # }
9906 # op {
9907 #   name: "Atanh"
9908 #   input_arg {
9909 #     name: "x"
9910 #     type_attr: "T"
9911 #   }
9912 #   output_arg {
9913 #     name: "y"
9914 #     type_attr: "T"
9915 #   }
9916 #   attr {
9917 #     name: "T"
9918 #     type: "type"
9919 #     allowed_values {
9920 #       list {
9921 #         type: DT_BFLOAT16
9922 #         type: DT_HALF
9923 #         type: DT_FLOAT
9924 #         type: DT_DOUBLE
9925 #         type: DT_COMPLEX64
9926 #         type: DT_COMPLEX128
9927 #       }
9928 #     }
9929 #   }
9930 # }
9931 # op {
9932 #   name: "BatchMatMul"
9933 #   input_arg {
9934 #     name: "x"
9935 #     type_attr: "T"
9936 #   }
9937 #   input_arg {
9938 #     name: "y"
9939 #     type_attr: "T"
9940 #   }
9941 #   output_arg {
9942 #     name: "output"
9943 #     type_attr: "T"
9944 #   }
9945 #   attr {
9946 #     name: "T"
9947 #     type: "type"
9948 #     allowed_values {
9949 #       list {
9950 #         type: DT_BFLOAT16
9951 #         type: DT_HALF
9952 #         type: DT_FLOAT
9953 #         type: DT_DOUBLE
9954 #         type: DT_INT32
9955 #         type: DT_COMPLEX64
9956 #         type: DT_COMPLEX128
9957 #       }
9958 #     }
9959 #   }
9960 #   attr {
9961 #     name: "adj_x"
9962 #     type: "bool"
9963 #     default_value {
9964 #       b: false
9965 #     }
9966 #   }
9967 #   attr {
9968 #     name: "adj_y"
9969 #     type: "bool"
9970 #     default_value {
9971 #       b: false
9972 #     }
9973 #   }
9974 # }
9975 # op {
9976 #   name: "BesselI0e"
9977 #   input_arg {
9978 #     name: "x"
9979 #     type_attr: "T"
9980 #   }
9981 #   output_arg {
9982 #     name: "y"
9983 #     type_attr: "T"
9984 #   }
9985 #   attr {
9986 #     name: "T"
9987 #     type: "type"
9988 #     allowed_values {
9989 #       list {
9990 #         type: DT_BFLOAT16
9991 #         type: DT_HALF
9992 #         type: DT_FLOAT
9993 #         type: DT_DOUBLE
9994 #       }
9995 #     }
9996 #   }
9997 # }
9998 # op {
9999 #   name: "BesselI1e"
10000 #   input_arg {
10001 #     name: "x"
10002 #     type_attr: "T"
10003 #   }
10004 #   output_arg {
10005 #     name: "y"
10006 #     type_attr: "T"
10007 #   }
10008 #   attr {
10009 #     name: "T"
10010 #     type: "type"
10011 #     allowed_values {
10012 #       list {
10013 #         type: DT_BFLOAT16
10014 #         type: DT_HALF
10015 #         type: DT_FLOAT
10016 #         type: DT_DOUBLE
10017 #       }
10018 #     }
10019 #   }
10020 # }
10021 # op {
10022 #   name: "Betainc"
10023 #   input_arg {
10024 #     name: "a"
10025 #     type_attr: "T"
10026 #   }
10027 #   input_arg {
10028 #     name: "b"
10029 #     type_attr: "T"
10030 #   }
10031 #   input_arg {
10032 #     name: "x"
10033 #     type_attr: "T"
10034 #   }
10035 #   output_arg {
10036 #     name: "z"
10037 #     type_attr: "T"
10038 #   }
10039 #   attr {
10040 #     name: "T"
10041 #     type: "type"
10042 #     allowed_values {
10043 #       list {
10044 #         type: DT_FLOAT
10045 #         type: DT_DOUBLE
10046 #       }
10047 #     }
10048 #   }
10049 # }
10050 # op {
10051 #   name: "Bincount"
10052 #   input_arg {
10053 #     name: "arr"
10054 #     type: DT_INT32
10055 #   }
10056 #   input_arg {
10057 #     name: "size"
10058 #     type: DT_INT32
10059 #   }
10060 #   input_arg {
10061 #     name: "weights"
10062 #     type_attr: "T"
10063 #   }
10064 #   output_arg {
10065 #     name: "bins"
10066 #     type_attr: "T"
10067 #   }
10068 #   attr {
10069 #     name: "T"
10070 #     type: "type"
10071 #     allowed_values {
10072 #       list {
10073 #         type: DT_INT32
10074 #         type: DT_INT64
10075 #         type: DT_FLOAT
10076 #         type: DT_DOUBLE
10077 #       }
10078 #     }
10079 #   }
10080 # }
10081 # op {
10082 #   name: "Bucketize"
10083 #   input_arg {
10084 #     name: "input"
10085 #     type_attr: "T"
10086 #   }
10087 #   output_arg {
10088 #     name: "output"
10089 #     type: DT_INT32
10090 #   }
10091 #   attr {
10092 #     name: "T"
10093 #     type: "type"
10094 #     allowed_values {
10095 #       list {
10096 #         type: DT_INT32
10097 #         type: DT_INT64
10098 #         type: DT_FLOAT
10099 #         type: DT_DOUBLE
10100 #       }
10101 #     }
10102 #   }
10103 #   attr {
10104 #     name: "boundaries"
10105 #     type: "list(float)"
10106 #   }
10107 # }
10108 # op {
10109 #   name: "Cast"
10110 #   input_arg {
10111 #     name: "x"
10112 #     type_attr: "SrcT"
10113 #   }
10114 #   output_arg {
10115 #     name: "y"
10116 #     type_attr: "DstT"
10117 #   }
10118 #   attr {
10119 #     name: "SrcT"
10120 #     type: "type"
10121 #   }
10122 #   attr {
10123 #     name: "DstT"
10124 #     type: "type"
10125 #   }
10126 #   attr {
10127 #     name: "Truncate"
10128 #     type: "bool"
10129 #     default_value {
10130 #       b: false
10131 #     }
10132 #   }
10133 # }
10134 # op {
10135 #   name: "Ceil"
10136 #   input_arg {
10137 #     name: "x"
10138 #     type_attr: "T"
10139 #   }
10140 #   output_arg {
10141 #     name: "y"
10142 #     type_attr: "T"
10143 #   }
10144 #   attr {
10145 #     name: "T"
10146 #     type: "type"
10147 #     allowed_values {
10148 #       list {
10149 #         type: DT_BFLOAT16
10150 #         type: DT_HALF
10151 #         type: DT_FLOAT
10152 #         type: DT_DOUBLE
10153 #       }
10154 #     }
10155 #   }
10156 # }
10157 # op {
10158 #   name: "ClipByValue"
10159 #   input_arg {
10160 #     name: "t"
10161 #     type_attr: "T"
10162 #   }
10163 #   input_arg {
10164 #     name: "clip_value_min"
10165 #     type_attr: "T"
10166 #   }
10167 #   input_arg {
10168 #     name: "clip_value_max"
10169 #     type_attr: "T"
10170 #   }
10171 #   output_arg {
10172 #     name: "output"
10173 #     type_attr: "T"
10174 #   }
10175 #   attr {
10176 #     name: "T"
10177 #     type: "type"
10178 #     allowed_values {
10179 #       list {
10180 #         type: DT_FLOAT
10181 #         type: DT_DOUBLE
10182 #         type: DT_INT32
10183 #         type: DT_UINT8
10184 #         type: DT_INT16
10185 #         type: DT_INT8
10186 #         type: DT_COMPLEX64
10187 #         type: DT_INT64
10188 #         type: DT_QINT8
10189 #         type: DT_QUINT8
10190 #         type: DT_QINT32
10191 #         type: DT_BFLOAT16
10192 #         type: DT_UINT16
10193 #         type: DT_COMPLEX128
10194 #         type: DT_HALF
10195 #         type: DT_UINT32
10196 #         type: DT_UINT64
10197 #       }
10198 #     }
10199 #   }
10200 # }
10201 # op {
10202 #   name: "CompareAndBitpack"
10203 #   input_arg {
10204 #     name: "input"
10205 #     type_attr: "T"
10206 #   }
10207 #   input_arg {
10208 #     name: "threshold"
10209 #     type_attr: "T"
10210 #   }
10211 #   output_arg {
10212 #     name: "output"
10213 #     type: DT_UINT8
10214 #   }
10215 #   attr {
10216 #     name: "T"
10217 #     type: "type"
10218 #     allowed_values {
10219 #       list {
10220 #         type: DT_BOOL
10221 #         type: DT_HALF
10222 #         type: DT_FLOAT
10223 #         type: DT_DOUBLE
10224 #         type: DT_INT8
10225 #         type: DT_INT16
10226 #         type: DT_INT32
10227 #         type: DT_INT64
10228 #       }
10229 #     }
10230 #   }
10231 # }
10232 # op {
10233 #   name: "Complex"
10234 #   input_arg {
10235 #     name: "real"
10236 #     type_attr: "T"
10237 #   }
10238 #   input_arg {
10239 #     name: "imag"
10240 #     type_attr: "T"
10241 #   }
10242 #   output_arg {
10243 #     name: "out"
10244 #     type_attr: "Tout"
10245 #   }
10246 #   attr {
10247 #     name: "T"
10248 #     type: "type"
10249 #     default_value {
10250 #       type: DT_FLOAT
10251 #     }
10252 #     allowed_values {
10253 #       list {
10254 #         type: DT_FLOAT
10255 #         type: DT_DOUBLE
10256 #       }
10257 #     }
10258 #   }
10259 #   attr {
10260 #     name: "Tout"
10261 #     type: "type"
10262 #     default_value {
10263 #       type: DT_COMPLEX64
10264 #     }
10265 #     allowed_values {
10266 #       list {
10267 #         type: DT_COMPLEX64
10268 #         type: DT_COMPLEX128
10269 #       }
10270 #     }
10271 #   }
10272 # }
10273 # op {
10274 #   name: "ComplexAbs"
10275 #   input_arg {
10276 #     name: "x"
10277 #     type_attr: "T"
10278 #   }
10279 #   output_arg {
10280 #     name: "y"
10281 #     type_attr: "Tout"
10282 #   }
10283 #   attr {
10284 #     name: "T"
10285 #     type: "type"
10286 #     default_value {
10287 #       type: DT_COMPLEX64
10288 #     }
10289 #     allowed_values {
10290 #       list {
10291 #         type: DT_COMPLEX64
10292 #         type: DT_COMPLEX128
10293 #       }
10294 #     }
10295 #   }
10296 #   attr {
10297 #     name: "Tout"
10298 #     type: "type"
10299 #     default_value {
10300 #       type: DT_FLOAT
10301 #     }
10302 #     allowed_values {
10303 #       list {
10304 #         type: DT_FLOAT
10305 #         type: DT_DOUBLE
10306 #       }
10307 #     }
10308 #   }
10309 # }
10310 # op {
10311 #   name: "Conj"
10312 #   input_arg {
10313 #     name: "input"
10314 #     type_attr: "T"
10315 #   }
10316 #   output_arg {
10317 #     name: "output"
10318 #     type_attr: "T"
10319 #   }
10320 #   attr {
10321 #     name: "T"
10322 #     type: "type"
10323 #     default_value {
10324 #       type: DT_COMPLEX64
10325 #     }
10326 #     allowed_values {
10327 #       list {
10328 #         type: DT_COMPLEX64
10329 #         type: DT_COMPLEX128
10330 #         type: DT_VARIANT
10331 #       }
10332 #     }
10333 #   }
10334 # }
10335 # op {
10336 #   name: "Cos"
10337 #   input_arg {
10338 #     name: "x"
10339 #     type_attr: "T"
10340 #   }
10341 #   output_arg {
10342 #     name: "y"
10343 #     type_attr: "T"
10344 #   }
10345 #   attr {
10346 #     name: "T"
10347 #     type: "type"
10348 #     allowed_values {
10349 #       list {
10350 #         type: DT_BFLOAT16
10351 #         type: DT_HALF
10352 #         type: DT_FLOAT
10353 #         type: DT_DOUBLE
10354 #         type: DT_COMPLEX64
10355 #         type: DT_COMPLEX128
10356 #       }
10357 #     }
10358 #   }
10359 # }
10360 # op {
10361 #   name: "Cosh"
10362 #   input_arg {
10363 #     name: "x"
10364 #     type_attr: "T"
10365 #   }
10366 #   output_arg {
10367 #     name: "y"
10368 #     type_attr: "T"
10369 #   }
10370 #   attr {
10371 #     name: "T"
10372 #     type: "type"
10373 #     allowed_values {
10374 #       list {
10375 #         type: DT_BFLOAT16
10376 #         type: DT_HALF
10377 #         type: DT_FLOAT
10378 #         type: DT_DOUBLE
10379 #         type: DT_COMPLEX64
10380 #         type: DT_COMPLEX128
10381 #       }
10382 #     }
10383 #   }
10384 # }
10385 # op {
10386 #   name: "Cross"
10387 #   input_arg {
10388 #     name: "a"
10389 #     type_attr: "T"
10390 #   }
10391 #   input_arg {
10392 #     name: "b"
10393 #     type_attr: "T"
10394 #   }
10395 #   output_arg {
10396 #     name: "product"
10397 #     type_attr: "T"
10398 #   }
10399 #   attr {
10400 #     name: "T"
10401 #     type: "type"
10402 #     allowed_values {
10403 #       list {
10404 #         type: DT_FLOAT
10405 #         type: DT_DOUBLE
10406 #         type: DT_INT32
10407 #         type: DT_UINT8
10408 #         type: DT_INT16
10409 #         type: DT_INT8
10410 #         type: DT_INT64
10411 #         type: DT_BFLOAT16
10412 #         type: DT_UINT16
10413 #         type: DT_HALF
10414 #         type: DT_UINT32
10415 #         type: DT_UINT64
10416 #       }
10417 #     }
10418 #   }
10419 # }
10420 # op {
10421 #   name: "Cumprod"
10422 #   input_arg {
10423 #     name: "x"
10424 #     type_attr: "T"
10425 #   }
10426 #   input_arg {
10427 #     name: "axis"
10428 #     type_attr: "Tidx"
10429 #   }
10430 #   output_arg {
10431 #     name: "out"
10432 #     type_attr: "T"
10433 #   }
10434 #   attr {
10435 #     name: "exclusive"
10436 #     type: "bool"
10437 #     default_value {
10438 #       b: false
10439 #     }
10440 #   }
10441 #   attr {
10442 #     name: "reverse"
10443 #     type: "bool"
10444 #     default_value {
10445 #       b: false
10446 #     }
10447 #   }
10448 #   attr {
10449 #     name: "T"
10450 #     type: "type"
10451 #     allowed_values {
10452 #       list {
10453 #         type: DT_FLOAT
10454 #         type: DT_DOUBLE
10455 #         type: DT_INT32
10456 #         type: DT_UINT8
10457 #         type: DT_INT16
10458 #         type: DT_INT8
10459 #         type: DT_COMPLEX64
10460 #         type: DT_INT64
10461 #         type: DT_QINT8
10462 #         type: DT_QUINT8
10463 #         type: DT_QINT32
10464 #         type: DT_BFLOAT16
10465 #         type: DT_UINT16
10466 #         type: DT_COMPLEX128
10467 #         type: DT_HALF
10468 #         type: DT_UINT32
10469 #         type: DT_UINT64
10470 #       }
10471 #     }
10472 #   }
10473 #   attr {
10474 #     name: "Tidx"
10475 #     type: "type"
10476 #     default_value {
10477 #       type: DT_INT32
10478 #     }
10479 #     allowed_values {
10480 #       list {
10481 #         type: DT_INT32
10482 #         type: DT_INT64
10483 #       }
10484 #     }
10485 #   }
10486 # }
10487 # op {
10488 #   name: "Cumsum"
10489 #   input_arg {
10490 #     name: "x"
10491 #     type_attr: "T"
10492 #   }
10493 #   input_arg {
10494 #     name: "axis"
10495 #     type_attr: "Tidx"
10496 #   }
10497 #   output_arg {
10498 #     name: "out"
10499 #     type_attr: "T"
10500 #   }
10501 #   attr {
10502 #     name: "exclusive"
10503 #     type: "bool"
10504 #     default_value {
10505 #       b: false
10506 #     }
10507 #   }
10508 #   attr {
10509 #     name: "reverse"
10510 #     type: "bool"
10511 #     default_value {
10512 #       b: false
10513 #     }
10514 #   }
10515 #   attr {
10516 #     name: "T"
10517 #     type: "type"
10518 #     allowed_values {
10519 #       list {
10520 #         type: DT_FLOAT
10521 #         type: DT_DOUBLE
10522 #         type: DT_INT32
10523 #         type: DT_UINT8
10524 #         type: DT_INT16
10525 #         type: DT_INT8
10526 #         type: DT_COMPLEX64
10527 #         type: DT_INT64
10528 #         type: DT_QINT8
10529 #         type: DT_QUINT8
10530 #         type: DT_QINT32
10531 #         type: DT_BFLOAT16
10532 #         type: DT_UINT16
10533 #         type: DT_COMPLEX128
10534 #         type: DT_HALF
10535 #         type: DT_UINT32
10536 #         type: DT_UINT64
10537 #       }
10538 #     }
10539 #   }
10540 #   attr {
10541 #     name: "Tidx"
10542 #     type: "type"
10543 #     default_value {
10544 #       type: DT_INT32
10545 #     }
10546 #     allowed_values {
10547 #       list {
10548 #         type: DT_INT32
10549 #         type: DT_INT64
10550 #       }
10551 #     }
10552 #   }
10553 # }
10554 # op {
10555 #   name: "Digamma"
10556 #   input_arg {
10557 #     name: "x"
10558 #     type_attr: "T"
10559 #   }
10560 #   output_arg {
10561 #     name: "y"
10562 #     type_attr: "T"
10563 #   }
10564 #   attr {
10565 #     name: "T"
10566 #     type: "type"
10567 #     allowed_values {
10568 #       list {
10569 #         type: DT_BFLOAT16
10570 #         type: DT_HALF
10571 #         type: DT_FLOAT
10572 #         type: DT_DOUBLE
10573 #       }
10574 #     }
10575 #   }
10576 # }
10577 # op {
10578 #   name: "Div"
10579 #   input_arg {
10580 #     name: "x"
10581 #     type_attr: "T"
10582 #   }
10583 #   input_arg {
10584 #     name: "y"
10585 #     type_attr: "T"
10586 #   }
10587 #   output_arg {
10588 #     name: "z"
10589 #     type_attr: "T"
10590 #   }
10591 #   attr {
10592 #     name: "T"
10593 #     type: "type"
10594 #     allowed_values {
10595 #       list {
10596 #         type: DT_BFLOAT16
10597 #         type: DT_HALF
10598 #         type: DT_FLOAT
10599 #         type: DT_DOUBLE
10600 #         type: DT_UINT8
10601 #         type: DT_INT8
10602 #         type: DT_UINT16
10603 #         type: DT_INT16
10604 #         type: DT_INT32
10605 #         type: DT_INT64
10606 #         type: DT_COMPLEX64
10607 #         type: DT_COMPLEX128
10608 #       }
10609 #     }
10610 #   }
10611 # }
10612 # op {
10613 #   name: "DivNoNan"
10614 #   input_arg {
10615 #     name: "x"
10616 #     type_attr: "T"
10617 #   }
10618 #   input_arg {
10619 #     name: "y"
10620 #     type_attr: "T"
10621 #   }
10622 #   output_arg {
10623 #     name: "z"
10624 #     type_attr: "T"
10625 #   }
10626 #   attr {
10627 #     name: "T"
10628 #     type: "type"
10629 #     allowed_values {
10630 #       list {
10631 #         type: DT_FLOAT
10632 #         type: DT_DOUBLE
10633 #       }
10634 #     }
10635 #   }
10636 # }
10637 # op {
10638 #   name: "Equal"
10639 #   input_arg {
10640 #     name: "x"
10641 #     type_attr: "T"
10642 #   }
10643 #   input_arg {
10644 #     name: "y"
10645 #     type_attr: "T"
10646 #   }
10647 #   output_arg {
10648 #     name: "z"
10649 #     type: DT_BOOL
10650 #   }
10651 #   attr {
10652 #     name: "T"
10653 #     type: "type"
10654 #     allowed_values {
10655 #       list {
10656 #         type: DT_BFLOAT16
10657 #         type: DT_HALF
10658 #         type: DT_FLOAT
10659 #         type: DT_DOUBLE
10660 #         type: DT_UINT8
10661 #         type: DT_INT8
10662 #         type: DT_INT16
10663 #         type: DT_INT32
10664 #         type: DT_INT64
10665 #         type: DT_COMPLEX64
10666 #         type: DT_QUINT8
10667 #         type: DT_QINT8
10668 #         type: DT_QINT32
10669 #         type: DT_STRING
10670 #         type: DT_BOOL
10671 #         type: DT_COMPLEX128
10672 #       }
10673 #     }
10674 #   }
10675 #   is_commutative: true
10676 # }
10677 # op {
10678 #   name: "Erf"
10679 #   input_arg {
10680 #     name: "x"
10681 #     type_attr: "T"
10682 #   }
10683 #   output_arg {
10684 #     name: "y"
10685 #     type_attr: "T"
10686 #   }
10687 #   attr {
10688 #     name: "T"
10689 #     type: "type"
10690 #     allowed_values {
10691 #       list {
10692 #         type: DT_BFLOAT16
10693 #         type: DT_HALF
10694 #         type: DT_FLOAT
10695 #         type: DT_DOUBLE
10696 #       }
10697 #     }
10698 #   }
10699 # }
10700 # op {
10701 #   name: "Erfc"
10702 #   input_arg {
10703 #     name: "x"
10704 #     type_attr: "T"
10705 #   }
10706 #   output_arg {
10707 #     name: "y"
10708 #     type_attr: "T"
10709 #   }
10710 #   attr {
10711 #     name: "T"
10712 #     type: "type"
10713 #     allowed_values {
10714 #       list {
10715 #         type: DT_BFLOAT16
10716 #         type: DT_HALF
10717 #         type: DT_FLOAT
10718 #         type: DT_DOUBLE
10719 #       }
10720 #     }
10721 #   }
10722 # }
10723 # op {
10724 #   name: "Exp"
10725 #   input_arg {
10726 #     name: "x"
10727 #     type_attr: "T"
10728 #   }
10729 #   output_arg {
10730 #     name: "y"
10731 #     type_attr: "T"
10732 #   }
10733 #   attr {
10734 #     name: "T"
10735 #     type: "type"
10736 #     allowed_values {
10737 #       list {
10738 #         type: DT_BFLOAT16
10739 #         type: DT_HALF
10740 #         type: DT_FLOAT
10741 #         type: DT_DOUBLE
10742 #         type: DT_COMPLEX64
10743 #         type: DT_COMPLEX128
10744 #       }
10745 #     }
10746 #   }
10747 # }
10748 # op {
10749 #   name: "Expm1"
10750 #   input_arg {
10751 #     name: "x"
10752 #     type_attr: "T"
10753 #   }
10754 #   output_arg {
10755 #     name: "y"
10756 #     type_attr: "T"
10757 #   }
10758 #   attr {
10759 #     name: "T"
10760 #     type: "type"
10761 #     allowed_values {
10762 #       list {
10763 #         type: DT_BFLOAT16
10764 #         type: DT_HALF
10765 #         type: DT_FLOAT
10766 #         type: DT_DOUBLE
10767 #         type: DT_COMPLEX64
10768 #         type: DT_COMPLEX128
10769 #       }
10770 #     }
10771 #   }
10772 # }
10773 # op {
10774 #   name: "Floor"
10775 #   input_arg {
10776 #     name: "x"
10777 #     type_attr: "T"
10778 #   }
10779 #   output_arg {
10780 #     name: "y"
10781 #     type_attr: "T"
10782 #   }
10783 #   attr {
10784 #     name: "T"
10785 #     type: "type"
10786 #     allowed_values {
10787 #       list {
10788 #         type: DT_BFLOAT16
10789 #         type: DT_HALF
10790 #         type: DT_FLOAT
10791 #         type: DT_DOUBLE
10792 #       }
10793 #     }
10794 #   }
10795 # }
10796 # op {
10797 #   name: "FloorDiv"
10798 #   input_arg {
10799 #     name: "x"
10800 #     type_attr: "T"
10801 #   }
10802 #   input_arg {
10803 #     name: "y"
10804 #     type_attr: "T"
10805 #   }
10806 #   output_arg {
10807 #     name: "z"
10808 #     type_attr: "T"
10809 #   }
10810 #   attr {
10811 #     name: "T"
10812 #     type: "type"
10813 #     allowed_values {
10814 #       list {
10815 #         type: DT_BFLOAT16
10816 #         type: DT_HALF
10817 #         type: DT_FLOAT
10818 #         type: DT_DOUBLE
10819 #         type: DT_UINT8
10820 #         type: DT_INT8
10821 #         type: DT_UINT16
10822 #         type: DT_INT16
10823 #         type: DT_INT32
10824 #         type: DT_INT64
10825 #         type: DT_COMPLEX64
10826 #         type: DT_COMPLEX128
10827 #       }
10828 #     }
10829 #   }
10830 # }
10831 # op {
10832 #   name: "FloorMod"
10833 #   input_arg {
10834 #     name: "x"
10835 #     type_attr: "T"
10836 #   }
10837 #   input_arg {
10838 #     name: "y"
10839 #     type_attr: "T"
10840 #   }
10841 #   output_arg {
10842 #     name: "z"
10843 #     type_attr: "T"
10844 #   }
10845 #   attr {
10846 #     name: "T"
10847 #     type: "type"
10848 #     allowed_values {
10849 #       list {
10850 #         type: DT_INT32
10851 #         type: DT_INT64
10852 #         type: DT_BFLOAT16
10853 #         type: DT_HALF
10854 #         type: DT_FLOAT
10855 #         type: DT_DOUBLE
10856 #       }
10857 #     }
10858 #   }
10859 # }
10860 # op {
10861 #   name: "Greater"
10862 #   input_arg {
10863 #     name: "x"
10864 #     type_attr: "T"
10865 #   }
10866 #   input_arg {
10867 #     name: "y"
10868 #     type_attr: "T"
10869 #   }
10870 #   output_arg {
10871 #     name: "z"
10872 #     type: DT_BOOL
10873 #   }
10874 #   attr {
10875 #     name: "T"
10876 #     type: "type"
10877 #     allowed_values {
10878 #       list {
10879 #         type: DT_FLOAT
10880 #         type: DT_DOUBLE
10881 #         type: DT_INT32
10882 #         type: DT_UINT8
10883 #         type: DT_INT16
10884 #         type: DT_INT8
10885 #         type: DT_INT64
10886 #         type: DT_BFLOAT16
10887 #         type: DT_UINT16
10888 #         type: DT_HALF
10889 #         type: DT_UINT32
10890 #         type: DT_UINT64
10891 #       }
10892 #     }
10893 #   }
10894 # }
10895 # op {
10896 #   name: "GreaterEqual"
10897 #   input_arg {
10898 #     name: "x"
10899 #     type_attr: "T"
10900 #   }
10901 #   input_arg {
10902 #     name: "y"
10903 #     type_attr: "T"
10904 #   }
10905 #   output_arg {
10906 #     name: "z"
10907 #     type: DT_BOOL
10908 #   }
10909 #   attr {
10910 #     name: "T"
10911 #     type: "type"
10912 #     allowed_values {
10913 #       list {
10914 #         type: DT_FLOAT
10915 #         type: DT_DOUBLE
10916 #         type: DT_INT32
10917 #         type: DT_UINT8
10918 #         type: DT_INT16
10919 #         type: DT_INT8
10920 #         type: DT_INT64
10921 #         type: DT_BFLOAT16
10922 #         type: DT_UINT16
10923 #         type: DT_HALF
10924 #         type: DT_UINT32
10925 #         type: DT_UINT64
10926 #       }
10927 #     }
10928 #   }
10929 # }
10930 # op {
10931 #   name: "HistogramFixedWidth"
10932 #   input_arg {
10933 #     name: "values"
10934 #     type_attr: "T"
10935 #   }
10936 #   input_arg {
10937 #     name: "value_range"
10938 #     type_attr: "T"
10939 #   }
10940 #   input_arg {
10941 #     name: "nbins"
10942 #     type: DT_INT32
10943 #   }
10944 #   output_arg {
10945 #     name: "out"
10946 #     type_attr: "dtype"
10947 #   }
10948 #   attr {
10949 #     name: "T"
10950 #     type: "type"
10951 #     allowed_values {
10952 #       list {
10953 #         type: DT_INT32
10954 #         type: DT_INT64
10955 #         type: DT_FLOAT
10956 #         type: DT_DOUBLE
10957 #       }
10958 #     }
10959 #   }
10960 #   attr {
10961 #     name: "dtype"
10962 #     type: "type"
10963 #     default_value {
10964 #       type: DT_INT32
10965 #     }
10966 #     allowed_values {
10967 #       list {
10968 #         type: DT_INT32
10969 #         type: DT_INT64
10970 #       }
10971 #     }
10972 #   }
10973 # }
10974 # op {
10975 #   name: "Igamma"
10976 #   input_arg {
10977 #     name: "a"
10978 #     type_attr: "T"
10979 #   }
10980 #   input_arg {
10981 #     name: "x"
10982 #     type_attr: "T"
10983 #   }
10984 #   output_arg {
10985 #     name: "z"
10986 #     type_attr: "T"
10987 #   }
10988 #   attr {
10989 #     name: "T"
10990 #     type: "type"
10991 #     allowed_values {
10992 #       list {
10993 #         type: DT_FLOAT
10994 #         type: DT_DOUBLE
10995 #       }
10996 #     }
10997 #   }
10998 # }
10999 # op {
11000 #   name: "IgammaGradA"
11001 #   input_arg {
11002 #     name: "a"
11003 #     type_attr: "T"
11004 #   }
11005 #   input_arg {
11006 #     name: "x"
11007 #     type_attr: "T"
11008 #   }
11009 #   output_arg {
11010 #     name: "z"
11011 #     type_attr: "T"
11012 #   }
11013 #   attr {
11014 #     name: "T"
11015 #     type: "type"
11016 #     allowed_values {
11017 #       list {
11018 #         type: DT_FLOAT
11019 #         type: DT_DOUBLE
11020 #       }
11021 #     }
11022 #   }
11023 # }
11024 # op {
11025 #   name: "Igammac"
11026 #   input_arg {
11027 #     name: "a"
11028 #     type_attr: "T"
11029 #   }
11030 #   input_arg {
11031 #     name: "x"
11032 #     type_attr: "T"
11033 #   }
11034 #   output_arg {
11035 #     name: "z"
11036 #     type_attr: "T"
11037 #   }
11038 #   attr {
11039 #     name: "T"
11040 #     type: "type"
11041 #     allowed_values {
11042 #       list {
11043 #         type: DT_FLOAT
11044 #         type: DT_DOUBLE
11045 #       }
11046 #     }
11047 #   }
11048 # }
11049 # op {
11050 #   name: "Imag"
11051 #   input_arg {
11052 #     name: "input"
11053 #     type_attr: "T"
11054 #   }
11055 #   output_arg {
11056 #     name: "output"
11057 #     type_attr: "Tout"
11058 #   }
11059 #   attr {
11060 #     name: "T"
11061 #     type: "type"
11062 #     default_value {
11063 #       type: DT_COMPLEX64
11064 #     }
11065 #     allowed_values {
11066 #       list {
11067 #         type: DT_COMPLEX64
11068 #         type: DT_COMPLEX128
11069 #       }
11070 #     }
11071 #   }
11072 #   attr {
11073 #     name: "Tout"
11074 #     type: "type"
11075 #     default_value {
11076 #       type: DT_FLOAT
11077 #     }
11078 #     allowed_values {
11079 #       list {
11080 #         type: DT_FLOAT
11081 #         type: DT_DOUBLE
11082 #       }
11083 #     }
11084 #   }
11085 # }
11086 # op {
11087 #   name: "Inv"
11088 #   input_arg {
11089 #     name: "x"
11090 #     type_attr: "T"
11091 #   }
11092 #   output_arg {
11093 #     name: "y"
11094 #     type_attr: "T"
11095 #   }
11096 #   attr {
11097 #     name: "T"
11098 #     type: "type"
11099 #     allowed_values {
11100 #       list {
11101 #         type: DT_BFLOAT16
11102 #         type: DT_HALF
11103 #         type: DT_FLOAT
11104 #         type: DT_DOUBLE
11105 #         type: DT_INT32
11106 #         type: DT_INT64
11107 #         type: DT_COMPLEX64
11108 #         type: DT_COMPLEX128
11109 #       }
11110 #     }
11111 #   }
11112 # }
11113 # op {
11114 #   name: "InvGrad"
11115 #   input_arg {
11116 #     name: "y"
11117 #     type_attr: "T"
11118 #   }
11119 #   input_arg {
11120 #     name: "dy"
11121 #     type_attr: "T"
11122 #   }
11123 #   output_arg {
11124 #     name: "z"
11125 #     type_attr: "T"
11126 #   }
11127 #   attr {
11128 #     name: "T"
11129 #     type: "type"
11130 #     allowed_values {
11131 #       list {
11132 #         type: DT_BFLOAT16
11133 #         type: DT_HALF
11134 #         type: DT_FLOAT
11135 #         type: DT_DOUBLE
11136 #         type: DT_COMPLEX64
11137 #         type: DT_COMPLEX128
11138 #       }
11139 #     }
11140 #   }
11141 # }
11142 # op {
11143 #   name: "IsFinite"
11144 #   input_arg {
11145 #     name: "x"
11146 #     type_attr: "T"
11147 #   }
11148 #   output_arg {
11149 #     name: "y"
11150 #     type: DT_BOOL
11151 #   }
11152 #   attr {
11153 #     name: "T"
11154 #     type: "type"
11155 #     allowed_values {
11156 #       list {
11157 #         type: DT_BFLOAT16
11158 #         type: DT_HALF
11159 #         type: DT_FLOAT
11160 #         type: DT_DOUBLE
11161 #       }
11162 #     }
11163 #   }
11164 # }
11165 # op {
11166 #   name: "IsInf"
11167 #   input_arg {
11168 #     name: "x"
11169 #     type_attr: "T"
11170 #   }
11171 #   output_arg {
11172 #     name: "y"
11173 #     type: DT_BOOL
11174 #   }
11175 #   attr {
11176 #     name: "T"
11177 #     type: "type"
11178 #     allowed_values {
11179 #       list {
11180 #         type: DT_BFLOAT16
11181 #         type: DT_HALF
11182 #         type: DT_FLOAT
11183 #         type: DT_DOUBLE
11184 #       }
11185 #     }
11186 #   }
11187 # }
11188 # op {
11189 #   name: "IsNan"
11190 #   input_arg {
11191 #     name: "x"
11192 #     type_attr: "T"
11193 #   }
11194 #   output_arg {
11195 #     name: "y"
11196 #     type: DT_BOOL
11197 #   }
11198 #   attr {
11199 #     name: "T"
11200 #     type: "type"
11201 #     allowed_values {
11202 #       list {
11203 #         type: DT_BFLOAT16
11204 #         type: DT_HALF
11205 #         type: DT_FLOAT
11206 #         type: DT_DOUBLE
11207 #       }
11208 #     }
11209 #   }
11210 # }
11211 # op {
11212 #   name: "Less"
11213 #   input_arg {
11214 #     name: "x"
11215 #     type_attr: "T"
11216 #   }
11217 #   input_arg {
11218 #     name: "y"
11219 #     type_attr: "T"
11220 #   }
11221 #   output_arg {
11222 #     name: "z"
11223 #     type: DT_BOOL
11224 #   }
11225 #   attr {
11226 #     name: "T"
11227 #     type: "type"
11228 #     allowed_values {
11229 #       list {
11230 #         type: DT_FLOAT
11231 #         type: DT_DOUBLE
11232 #         type: DT_INT32
11233 #         type: DT_UINT8
11234 #         type: DT_INT16
11235 #         type: DT_INT8
11236 #         type: DT_INT64
11237 #         type: DT_BFLOAT16
11238 #         type: DT_UINT16
11239 #         type: DT_HALF
11240 #         type: DT_UINT32
11241 #         type: DT_UINT64
11242 #       }
11243 #     }
11244 #   }
11245 # }
11246 # op {
11247 #   name: "LessEqual"
11248 #   input_arg {
11249 #     name: "x"
11250 #     type_attr: "T"
11251 #   }
11252 #   input_arg {
11253 #     name: "y"
11254 #     type_attr: "T"
11255 #   }
11256 #   output_arg {
11257 #     name: "z"
11258 #     type: DT_BOOL
11259 #   }
11260 #   attr {
11261 #     name: "T"
11262 #     type: "type"
11263 #     allowed_values {
11264 #       list {
11265 #         type: DT_FLOAT
11266 #         type: DT_DOUBLE
11267 #         type: DT_INT32
11268 #         type: DT_UINT8
11269 #         type: DT_INT16
11270 #         type: DT_INT8
11271 #         type: DT_INT64
11272 #         type: DT_BFLOAT16
11273 #         type: DT_UINT16
11274 #         type: DT_HALF
11275 #         type: DT_UINT32
11276 #         type: DT_UINT64
11277 #       }
11278 #     }
11279 #   }
11280 # }
11281 # op {
11282 #   name: "Lgamma"
11283 #   input_arg {
11284 #     name: "x"
11285 #     type_attr: "T"
11286 #   }
11287 #   output_arg {
11288 #     name: "y"
11289 #     type_attr: "T"
11290 #   }
11291 #   attr {
11292 #     name: "T"
11293 #     type: "type"
11294 #     allowed_values {
11295 #       list {
11296 #         type: DT_BFLOAT16
11297 #         type: DT_HALF
11298 #         type: DT_FLOAT
11299 #         type: DT_DOUBLE
11300 #       }
11301 #     }
11302 #   }
11303 # }
11304 # op {
11305 #   name: "LinSpace"
11306 #   input_arg {
11307 #     name: "start"
11308 #     type_attr: "T"
11309 #   }
11310 #   input_arg {
11311 #     name: "stop"
11312 #     type_attr: "T"
11313 #   }
11314 #   input_arg {
11315 #     name: "num"
11316 #     type_attr: "Tidx"
11317 #   }
11318 #   output_arg {
11319 #     name: "output"
11320 #     type_attr: "T"
11321 #   }
11322 #   attr {
11323 #     name: "T"
11324 #     type: "type"
11325 #     allowed_values {
11326 #       list {
11327 #         type: DT_BFLOAT16
11328 #         type: DT_FLOAT
11329 #         type: DT_DOUBLE
11330 #       }
11331 #     }
11332 #   }
11333 #   attr {
11334 #     name: "Tidx"
11335 #     type: "type"
11336 #     default_value {
11337 #       type: DT_INT32
11338 #     }
11339 #     allowed_values {
11340 #       list {
11341 #         type: DT_INT32
11342 #         type: DT_INT64
11343 #       }
11344 #     }
11345 #   }
11346 # }
11347 # op {
11348 #   name: "Log"
11349 #   input_arg {
11350 #     name: "x"
11351 #     type_attr: "T"
11352 #   }
11353 #   output_arg {
11354 #     name: "y"
11355 #     type_attr: "T"
11356 #   }
11357 #   attr {
11358 #     name: "T"
11359 #     type: "type"
11360 #     allowed_values {
11361 #       list {
11362 #         type: DT_BFLOAT16
11363 #         type: DT_HALF
11364 #         type: DT_FLOAT
11365 #         type: DT_DOUBLE
11366 #         type: DT_COMPLEX64
11367 #         type: DT_COMPLEX128
11368 #       }
11369 #     }
11370 #   }
11371 # }
11372 # op {
11373 #   name: "Log1p"
11374 #   input_arg {
11375 #     name: "x"
11376 #     type_attr: "T"
11377 #   }
11378 #   output_arg {
11379 #     name: "y"
11380 #     type_attr: "T"
11381 #   }
11382 #   attr {
11383 #     name: "T"
11384 #     type: "type"
11385 #     allowed_values {
11386 #       list {
11387 #         type: DT_BFLOAT16
11388 #         type: DT_HALF
11389 #         type: DT_FLOAT
11390 #         type: DT_DOUBLE
11391 #         type: DT_COMPLEX64
11392 #         type: DT_COMPLEX128
11393 #       }
11394 #     }
11395 #   }
11396 # }
11397 # op {
11398 #   name: "LogicalAnd"
11399 #   input_arg {
11400 #     name: "x"
11401 #     type: DT_BOOL
11402 #   }
11403 #   input_arg {
11404 #     name: "y"
11405 #     type: DT_BOOL
11406 #   }
11407 #   output_arg {
11408 #     name: "z"
11409 #     type: DT_BOOL
11410 #   }
11411 #   is_commutative: true
11412 # }
11413 # op {
11414 #   name: "LogicalNot"
11415 #   input_arg {
11416 #     name: "x"
11417 #     type: DT_BOOL
11418 #   }
11419 #   output_arg {
11420 #     name: "y"
11421 #     type: DT_BOOL
11422 #   }
11423 # }
11424 # op {
11425 #   name: "LogicalOr"
11426 #   input_arg {
11427 #     name: "x"
11428 #     type: DT_BOOL
11429 #   }
11430 #   input_arg {
11431 #     name: "y"
11432 #     type: DT_BOOL
11433 #   }
11434 #   output_arg {
11435 #     name: "z"
11436 #     type: DT_BOOL
11437 #   }
11438 #   is_commutative: true
11439 # }
11440 # op {
11441 #   name: "MatMul"
11442 #   input_arg {
11443 #     name: "a"
11444 #     type_attr: "T"
11445 #   }
11446 #   input_arg {
11447 #     name: "b"
11448 #     type_attr: "T"
11449 #   }
11450 #   output_arg {
11451 #     name: "product"
11452 #     type_attr: "T"
11453 #   }
11454 #   attr {
11455 #     name: "transpose_a"
11456 #     type: "bool"
11457 #     default_value {
11458 #       b: false
11459 #     }
11460 #   }
11461 #   attr {
11462 #     name: "transpose_b"
11463 #     type: "bool"
11464 #     default_value {
11465 #       b: false
11466 #     }
11467 #   }
11468 #   attr {
11469 #     name: "T"
11470 #     type: "type"
11471 #     allowed_values {
11472 #       list {
11473 #         type: DT_BFLOAT16
11474 #         type: DT_HALF
11475 #         type: DT_FLOAT
11476 #         type: DT_DOUBLE
11477 #         type: DT_INT32
11478 #         type: DT_COMPLEX64
11479 #         type: DT_COMPLEX128
11480 #       }
11481 #     }
11482 #   }
11483 # }
11484 # op {
11485 #   name: "Max"
11486 #   input_arg {
11487 #     name: "input"
11488 #     type_attr: "T"
11489 #   }
11490 #   input_arg {
11491 #     name: "reduction_indices"
11492 #     type_attr: "Tidx"
11493 #   }
11494 #   output_arg {
11495 #     name: "output"
11496 #     type_attr: "T"
11497 #   }
11498 #   attr {
11499 #     name: "keep_dims"
11500 #     type: "bool"
11501 #     default_value {
11502 #       b: false
11503 #     }
11504 #   }
11505 #   attr {
11506 #     name: "T"
11507 #     type: "type"
11508 #     allowed_values {
11509 #       list {
11510 #         type: DT_FLOAT
11511 #         type: DT_DOUBLE
11512 #         type: DT_INT32
11513 #         type: DT_UINT8
11514 #         type: DT_INT16
11515 #         type: DT_INT8
11516 #         type: DT_COMPLEX64
11517 #         type: DT_INT64
11518 #         type: DT_QINT8
11519 #         type: DT_QUINT8
11520 #         type: DT_QINT32
11521 #         type: DT_BFLOAT16
11522 #         type: DT_UINT16
11523 #         type: DT_COMPLEX128
11524 #         type: DT_HALF
11525 #         type: DT_UINT32
11526 #         type: DT_UINT64
11527 #       }
11528 #     }
11529 #   }
11530 #   attr {
11531 #     name: "Tidx"
11532 #     type: "type"
11533 #     default_value {
11534 #       type: DT_INT32
11535 #     }
11536 #     allowed_values {
11537 #       list {
11538 #         type: DT_INT32
11539 #         type: DT_INT64
11540 #       }
11541 #     }
11542 #   }
11543 # }
11544 # op {
11545 #   name: "Maximum"
11546 #   input_arg {
11547 #     name: "x"
11548 #     type_attr: "T"
11549 #   }
11550 #   input_arg {
11551 #     name: "y"
11552 #     type_attr: "T"
11553 #   }
11554 #   output_arg {
11555 #     name: "z"
11556 #     type_attr: "T"
11557 #   }
11558 #   attr {
11559 #     name: "T"
11560 #     type: "type"
11561 #     allowed_values {
11562 #       list {
11563 #         type: DT_BFLOAT16
11564 #         type: DT_HALF
11565 #         type: DT_FLOAT
11566 #         type: DT_DOUBLE
11567 #         type: DT_INT32
11568 #         type: DT_INT64
11569 #       }
11570 #     }
11571 #   }
11572 #   is_commutative: true
11573 # }
11574 # op {
11575 #   name: "Mean"
11576 #   input_arg {
11577 #     name: "input"
11578 #     type_attr: "T"
11579 #   }
11580 #   input_arg {
11581 #     name: "reduction_indices"
11582 #     type_attr: "Tidx"
11583 #   }
11584 #   output_arg {
11585 #     name: "output"
11586 #     type_attr: "T"
11587 #   }
11588 #   attr {
11589 #     name: "keep_dims"
11590 #     type: "bool"
11591 #     default_value {
11592 #       b: false
11593 #     }
11594 #   }
11595 #   attr {
11596 #     name: "T"
11597 #     type: "type"
11598 #     allowed_values {
11599 #       list {
11600 #         type: DT_FLOAT
11601 #         type: DT_DOUBLE
11602 #         type: DT_INT32
11603 #         type: DT_UINT8
11604 #         type: DT_INT16
11605 #         type: DT_INT8
11606 #         type: DT_COMPLEX64
11607 #         type: DT_INT64
11608 #         type: DT_QINT8
11609 #         type: DT_QUINT8
11610 #         type: DT_QINT32
11611 #         type: DT_BFLOAT16
11612 #         type: DT_UINT16
11613 #         type: DT_COMPLEX128
11614 #         type: DT_HALF
11615 #         type: DT_UINT32
11616 #         type: DT_UINT64
11617 #       }
11618 #     }
11619 #   }
11620 #   attr {
11621 #     name: "Tidx"
11622 #     type: "type"
11623 #     default_value {
11624 #       type: DT_INT32
11625 #     }
11626 #     allowed_values {
11627 #       list {
11628 #         type: DT_INT32
11629 #         type: DT_INT64
11630 #       }
11631 #     }
11632 #   }
11633 # }
11634 # op {
11635 #   name: "Min"
11636 #   input_arg {
11637 #     name: "input"
11638 #     type_attr: "T"
11639 #   }
11640 #   input_arg {
11641 #     name: "reduction_indices"
11642 #     type_attr: "Tidx"
11643 #   }
11644 #   output_arg {
11645 #     name: "output"
11646 #     type_attr: "T"
11647 #   }
11648 #   attr {
11649 #     name: "keep_dims"
11650 #     type: "bool"
11651 #     default_value {
11652 #       b: false
11653 #     }
11654 #   }
11655 #   attr {
11656 #     name: "T"
11657 #     type: "type"
11658 #     allowed_values {
11659 #       list {
11660 #         type: DT_FLOAT
11661 #         type: DT_DOUBLE
11662 #         type: DT_INT32
11663 #         type: DT_UINT8
11664 #         type: DT_INT16
11665 #         type: DT_INT8
11666 #         type: DT_COMPLEX64
11667 #         type: DT_INT64
11668 #         type: DT_QINT8
11669 #         type: DT_QUINT8
11670 #         type: DT_QINT32
11671 #         type: DT_BFLOAT16
11672 #         type: DT_UINT16
11673 #         type: DT_COMPLEX128
11674 #         type: DT_HALF
11675 #         type: DT_UINT32
11676 #         type: DT_UINT64
11677 #       }
11678 #     }
11679 #   }
11680 #   attr {
11681 #     name: "Tidx"
11682 #     type: "type"
11683 #     default_value {
11684 #       type: DT_INT32
11685 #     }
11686 #     allowed_values {
11687 #       list {
11688 #         type: DT_INT32
11689 #         type: DT_INT64
11690 #       }
11691 #     }
11692 #   }
11693 # }
11694 # op {
11695 #   name: "Minimum"
11696 #   input_arg {
11697 #     name: "x"
11698 #     type_attr: "T"
11699 #   }
11700 #   input_arg {
11701 #     name: "y"
11702 #     type_attr: "T"
11703 #   }
11704 #   output_arg {
11705 #     name: "z"
11706 #     type_attr: "T"
11707 #   }
11708 #   attr {
11709 #     name: "T"
11710 #     type: "type"
11711 #     allowed_values {
11712 #       list {
11713 #         type: DT_BFLOAT16
11714 #         type: DT_HALF
11715 #         type: DT_FLOAT
11716 #         type: DT_DOUBLE
11717 #         type: DT_INT32
11718 #         type: DT_INT64
11719 #       }
11720 #     }
11721 #   }
11722 #   is_commutative: true
11723 # }
11724 # op {
11725 #   name: "Mod"
11726 #   input_arg {
11727 #     name: "x"
11728 #     type_attr: "T"
11729 #   }
11730 #   input_arg {
11731 #     name: "y"
11732 #     type_attr: "T"
11733 #   }
11734 #   output_arg {
11735 #     name: "z"
11736 #     type_attr: "T"
11737 #   }
11738 #   attr {
11739 #     name: "T"
11740 #     type: "type"
11741 #     allowed_values {
11742 #       list {
11743 #         type: DT_INT32
11744 #         type: DT_INT64
11745 #         type: DT_HALF
11746 #         type: DT_HALF
11747 #         type: DT_BFLOAT16
11748 #         type: DT_FLOAT
11749 #         type: DT_DOUBLE
11750 #       }
11751 #     }
11752 #   }
11753 # }
11754 # op {
11755 #   name: "Mul"
11756 #   input_arg {
11757 #     name: "x"
11758 #     type_attr: "T"
11759 #   }
11760 #   input_arg {
11761 #     name: "y"
11762 #     type_attr: "T"
11763 #   }
11764 #   output_arg {
11765 #     name: "z"
11766 #     type_attr: "T"
11767 #   }
11768 #   attr {
11769 #     name: "T"
11770 #     type: "type"
11771 #     allowed_values {
11772 #       list {
11773 #         type: DT_BFLOAT16
11774 #         type: DT_HALF
11775 #         type: DT_FLOAT
11776 #         type: DT_DOUBLE
11777 #         type: DT_UINT8
11778 #         type: DT_INT8
11779 #         type: DT_UINT16
11780 #         type: DT_INT16
11781 #         type: DT_INT32
11782 #         type: DT_INT64
11783 #         type: DT_COMPLEX64
11784 #         type: DT_COMPLEX128
11785 #       }
11786 #     }
11787 #   }
11788 #   is_commutative: true
11789 # }
11790 # op {
11791 #   name: "Neg"
11792 #   input_arg {
11793 #     name: "x"
11794 #     type_attr: "T"
11795 #   }
11796 #   output_arg {
11797 #     name: "y"
11798 #     type_attr: "T"
11799 #   }
11800 #   attr {
11801 #     name: "T"
11802 #     type: "type"
11803 #     allowed_values {
11804 #       list {
11805 #         type: DT_BFLOAT16
11806 #         type: DT_HALF
11807 #         type: DT_FLOAT
11808 #         type: DT_DOUBLE
11809 #         type: DT_INT32
11810 #         type: DT_INT64
11811 #         type: DT_COMPLEX64
11812 #         type: DT_COMPLEX128
11813 #       }
11814 #     }
11815 #   }
11816 # }
11817 # op {
11818 #   name: "NotEqual"
11819 #   input_arg {
11820 #     name: "x"
11821 #     type_attr: "T"
11822 #   }
11823 #   input_arg {
11824 #     name: "y"
11825 #     type_attr: "T"
11826 #   }
11827 #   output_arg {
11828 #     name: "z"
11829 #     type: DT_BOOL
11830 #   }
11831 #   attr {
11832 #     name: "T"
11833 #     type: "type"
11834 #     allowed_values {
11835 #       list {
11836 #         type: DT_BFLOAT16
11837 #         type: DT_HALF
11838 #         type: DT_FLOAT
11839 #         type: DT_DOUBLE
11840 #         type: DT_UINT8
11841 #         type: DT_INT8
11842 #         type: DT_INT16
11843 #         type: DT_INT32
11844 #         type: DT_INT64
11845 #         type: DT_COMPLEX64
11846 #         type: DT_QUINT8
11847 #         type: DT_QINT8
11848 #         type: DT_QINT32
11849 #         type: DT_STRING
11850 #         type: DT_BOOL
11851 #         type: DT_COMPLEX128
11852 #       }
11853 #     }
11854 #   }
11855 #   is_commutative: true
11856 # }
11857 # op {
11858 #   name: "Polygamma"
11859 #   input_arg {
11860 #     name: "a"
11861 #     type_attr: "T"
11862 #   }
11863 #   input_arg {
11864 #     name: "x"
11865 #     type_attr: "T"
11866 #   }
11867 #   output_arg {
11868 #     name: "z"
11869 #     type_attr: "T"
11870 #   }
11871 #   attr {
11872 #     name: "T"
11873 #     type: "type"
11874 #     allowed_values {
11875 #       list {
11876 #         type: DT_FLOAT
11877 #         type: DT_DOUBLE
11878 #       }
11879 #     }
11880 #   }
11881 # }
11882 # op {
11883 #   name: "Pow"
11884 #   input_arg {
11885 #     name: "x"
11886 #     type_attr: "T"
11887 #   }
11888 #   input_arg {
11889 #     name: "y"
11890 #     type_attr: "T"
11891 #   }
11892 #   output_arg {
11893 #     name: "z"
11894 #     type_attr: "T"
11895 #   }
11896 #   attr {
11897 #     name: "T"
11898 #     type: "type"
11899 #     allowed_values {
11900 #       list {
11901 #         type: DT_BFLOAT16
11902 #         type: DT_FLOAT
11903 #         type: DT_HALF
11904 #         type: DT_DOUBLE
11905 #         type: DT_INT32
11906 #         type: DT_INT64
11907 #         type: DT_COMPLEX64
11908 #         type: DT_COMPLEX128
11909 #       }
11910 #     }
11911 #   }
11912 # }
11913 # op {
11914 #   name: "Prod"
11915 #   input_arg {
11916 #     name: "input"
11917 #     type_attr: "T"
11918 #   }
11919 #   input_arg {
11920 #     name: "reduction_indices"
11921 #     type_attr: "Tidx"
11922 #   }
11923 #   output_arg {
11924 #     name: "output"
11925 #     type_attr: "T"
11926 #   }
11927 #   attr {
11928 #     name: "keep_dims"
11929 #     type: "bool"
11930 #     default_value {
11931 #       b: false
11932 #     }
11933 #   }
11934 #   attr {
11935 #     name: "T"
11936 #     type: "type"
11937 #     allowed_values {
11938 #       list {
11939 #         type: DT_FLOAT
11940 #         type: DT_DOUBLE
11941 #         type: DT_INT32
11942 #         type: DT_UINT8
11943 #         type: DT_INT16
11944 #         type: DT_INT8
11945 #         type: DT_COMPLEX64
11946 #         type: DT_INT64
11947 #         type: DT_QINT8
11948 #         type: DT_QUINT8
11949 #         type: DT_QINT32
11950 #         type: DT_BFLOAT16
11951 #         type: DT_UINT16
11952 #         type: DT_COMPLEX128
11953 #         type: DT_HALF
11954 #         type: DT_UINT32
11955 #         type: DT_UINT64
11956 #       }
11957 #     }
11958 #   }
11959 #   attr {
11960 #     name: "Tidx"
11961 #     type: "type"
11962 #     default_value {
11963 #       type: DT_INT32
11964 #     }
11965 #     allowed_values {
11966 #       list {
11967 #         type: DT_INT32
11968 #         type: DT_INT64
11969 #       }
11970 #     }
11971 #   }
11972 # }
11973 # op {
11974 #   name: "QuantizeDownAndShrinkRange"
11975 #   input_arg {
11976 #     name: "input"
11977 #     type_attr: "Tinput"
11978 #   }
11979 #   input_arg {
11980 #     name: "input_min"
11981 #     type: DT_FLOAT
11982 #   }
11983 #   input_arg {
11984 #     name: "input_max"
11985 #     type: DT_FLOAT
11986 #   }
11987 #   output_arg {
11988 #     name: "output"
11989 #     type_attr: "out_type"
11990 #   }
11991 #   output_arg {
11992 #     name: "output_min"
11993 #     type: DT_FLOAT
11994 #   }
11995 #   output_arg {
11996 #     name: "output_max"
11997 #     type: DT_FLOAT
11998 #   }
11999 #   attr {
12000 #     name: "Tinput"
12001 #     type: "type"
12002 #     allowed_values {
12003 #       list {
12004 #         type: DT_QINT8
12005 #         type: DT_QUINT8
12006 #         type: DT_QINT32
12007 #         type: DT_QINT16
12008 #         type: DT_QUINT16
12009 #       }
12010 #     }
12011 #   }
12012 #   attr {
12013 #     name: "out_type"
12014 #     type: "type"
12015 #     allowed_values {
12016 #       list {
12017 #         type: DT_QINT8
12018 #         type: DT_QUINT8
12019 #         type: DT_QINT32
12020 #         type: DT_QINT16
12021 #         type: DT_QUINT16
12022 #       }
12023 #     }
12024 #   }
12025 # }
12026 # op {
12027 #   name: "QuantizedAdd"
12028 #   input_arg {
12029 #     name: "x"
12030 #     type_attr: "T1"
12031 #   }
12032 #   input_arg {
12033 #     name: "y"
12034 #     type_attr: "T2"
12035 #   }
12036 #   input_arg {
12037 #     name: "min_x"
12038 #     type: DT_FLOAT
12039 #   }
12040 #   input_arg {
12041 #     name: "max_x"
12042 #     type: DT_FLOAT
12043 #   }
12044 #   input_arg {
12045 #     name: "min_y"
12046 #     type: DT_FLOAT
12047 #   }
12048 #   input_arg {
12049 #     name: "max_y"
12050 #     type: DT_FLOAT
12051 #   }
12052 #   output_arg {
12053 #     name: "z"
12054 #     type_attr: "Toutput"
12055 #   }
12056 #   output_arg {
12057 #     name: "min_z"
12058 #     type: DT_FLOAT
12059 #   }
12060 #   output_arg {
12061 #     name: "max_z"
12062 #     type: DT_FLOAT
12063 #   }
12064 #   attr {
12065 #     name: "T1"
12066 #     type: "type"
12067 #     allowed_values {
12068 #       list {
12069 #         type: DT_QINT8
12070 #         type: DT_QUINT8
12071 #         type: DT_QINT32
12072 #         type: DT_QINT16
12073 #         type: DT_QUINT16
12074 #       }
12075 #     }
12076 #   }
12077 #   attr {
12078 #     name: "T2"
12079 #     type: "type"
12080 #     allowed_values {
12081 #       list {
12082 #         type: DT_QINT8
12083 #         type: DT_QUINT8
12084 #         type: DT_QINT32
12085 #         type: DT_QINT16
12086 #         type: DT_QUINT16
12087 #       }
12088 #     }
12089 #   }
12090 #   attr {
12091 #     name: "Toutput"
12092 #     type: "type"
12093 #     default_value {
12094 #       type: DT_QINT32
12095 #     }
12096 #     allowed_values {
12097 #       list {
12098 #         type: DT_QINT8
12099 #         type: DT_QUINT8
12100 #         type: DT_QINT32
12101 #         type: DT_QINT16
12102 #         type: DT_QUINT16
12103 #       }
12104 #     }
12105 #   }
12106 #   is_commutative: true
12107 # }
12108 # op {
12109 #   name: "QuantizedMatMul"
12110 #   input_arg {
12111 #     name: "a"
12112 #     type_attr: "T1"
12113 #   }
12114 #   input_arg {
12115 #     name: "b"
12116 #     type_attr: "T2"
12117 #   }
12118 #   input_arg {
12119 #     name: "min_a"
12120 #     type: DT_FLOAT
12121 #   }
12122 #   input_arg {
12123 #     name: "max_a"
12124 #     type: DT_FLOAT
12125 #   }
12126 #   input_arg {
12127 #     name: "min_b"
12128 #     type: DT_FLOAT
12129 #   }
12130 #   input_arg {
12131 #     name: "max_b"
12132 #     type: DT_FLOAT
12133 #   }
12134 #   output_arg {
12135 #     name: "out"
12136 #     type_attr: "Toutput"
12137 #   }
12138 #   output_arg {
12139 #     name: "min_out"
12140 #     type: DT_FLOAT
12141 #   }
12142 #   output_arg {
12143 #     name: "max_out"
12144 #     type: DT_FLOAT
12145 #   }
12146 #   attr {
12147 #     name: "T1"
12148 #     type: "type"
12149 #     allowed_values {
12150 #       list {
12151 #         type: DT_QINT8
12152 #         type: DT_QUINT8
12153 #         type: DT_QINT32
12154 #         type: DT_QINT16
12155 #         type: DT_QUINT16
12156 #       }
12157 #     }
12158 #   }
12159 #   attr {
12160 #     name: "T2"
12161 #     type: "type"
12162 #     allowed_values {
12163 #       list {
12164 #         type: DT_QINT8
12165 #         type: DT_QUINT8
12166 #         type: DT_QINT32
12167 #         type: DT_QINT16
12168 #         type: DT_QUINT16
12169 #       }
12170 #     }
12171 #   }
12172 #   attr {
12173 #     name: "Toutput"
12174 #     type: "type"
12175 #     default_value {
12176 #       type: DT_QINT32
12177 #     }
12178 #     allowed_values {
12179 #       list {
12180 #         type: DT_QINT8
12181 #         type: DT_QUINT8
12182 #         type: DT_QINT32
12183 #         type: DT_QINT16
12184 #         type: DT_QUINT16
12185 #       }
12186 #     }
12187 #   }
12188 #   attr {
12189 #     name: "transpose_a"
12190 #     type: "bool"
12191 #     default_value {
12192 #       b: false
12193 #     }
12194 #   }
12195 #   attr {
12196 #     name: "transpose_b"
12197 #     type: "bool"
12198 #     default_value {
12199 #       b: false
12200 #     }
12201 #   }
12202 #   attr {
12203 #     name: "Tactivation"
12204 #     type: "type"
12205 #     default_value {
12206 #       type: DT_QUINT8
12207 #     }
12208 #     allowed_values {
12209 #       list {
12210 #         type: DT_QINT8
12211 #         type: DT_QUINT8
12212 #         type: DT_QINT32
12213 #         type: DT_QINT16
12214 #         type: DT_QUINT16
12215 #       }
12216 #     }
12217 #   }
12218 # }
12219 # op {
12220 #   name: "QuantizedMul"
12221 #   input_arg {
12222 #     name: "x"
12223 #     type_attr: "T1"
12224 #   }
12225 #   input_arg {
12226 #     name: "y"
12227 #     type_attr: "T2"
12228 #   }
12229 #   input_arg {
12230 #     name: "min_x"
12231 #     type: DT_FLOAT
12232 #   }
12233 #   input_arg {
12234 #     name: "max_x"
12235 #     type: DT_FLOAT
12236 #   }
12237 #   input_arg {
12238 #     name: "min_y"
12239 #     type: DT_FLOAT
12240 #   }
12241 #   input_arg {
12242 #     name: "max_y"
12243 #     type: DT_FLOAT
12244 #   }
12245 #   output_arg {
12246 #     name: "z"
12247 #     type_attr: "Toutput"
12248 #   }
12249 #   output_arg {
12250 #     name: "min_z"
12251 #     type: DT_FLOAT
12252 #   }
12253 #   output_arg {
12254 #     name: "max_z"
12255 #     type: DT_FLOAT
12256 #   }
12257 #   attr {
12258 #     name: "T1"
12259 #     type: "type"
12260 #     allowed_values {
12261 #       list {
12262 #         type: DT_QINT8
12263 #         type: DT_QUINT8
12264 #         type: DT_QINT32
12265 #         type: DT_QINT16
12266 #         type: DT_QUINT16
12267 #       }
12268 #     }
12269 #   }
12270 #   attr {
12271 #     name: "T2"
12272 #     type: "type"
12273 #     allowed_values {
12274 #       list {
12275 #         type: DT_QINT8
12276 #         type: DT_QUINT8
12277 #         type: DT_QINT32
12278 #         type: DT_QINT16
12279 #         type: DT_QUINT16
12280 #       }
12281 #     }
12282 #   }
12283 #   attr {
12284 #     name: "Toutput"
12285 #     type: "type"
12286 #     default_value {
12287 #       type: DT_QINT32
12288 #     }
12289 #     allowed_values {
12290 #       list {
12291 #         type: DT_QINT8
12292 #         type: DT_QUINT8
12293 #         type: DT_QINT32
12294 #         type: DT_QINT16
12295 #         type: DT_QUINT16
12296 #       }
12297 #     }
12298 #   }
12299 #   is_commutative: true
12300 # }
12301 # op {
12302 #   name: "Range"
12303 #   input_arg {
12304 #     name: "start"
12305 #     type_attr: "Tidx"
12306 #   }
12307 #   input_arg {
12308 #     name: "limit"
12309 #     type_attr: "Tidx"
12310 #   }
12311 #   input_arg {
12312 #     name: "delta"
12313 #     type_attr: "Tidx"
12314 #   }
12315 #   output_arg {
12316 #     name: "output"
12317 #     type_attr: "Tidx"
12318 #   }
12319 #   attr {
12320 #     name: "Tidx"
12321 #     type: "type"
12322 #     default_value {
12323 #       type: DT_INT32
12324 #     }
12325 #     allowed_values {
12326 #       list {
12327 #         type: DT_BFLOAT16
12328 #         type: DT_FLOAT
12329 #         type: DT_DOUBLE
12330 #         type: DT_INT32
12331 #         type: DT_INT64
12332 #       }
12333 #     }
12334 #   }
12335 # }
12336 # op {
12337 #   name: "Real"
12338 #   input_arg {
12339 #     name: "input"
12340 #     type_attr: "T"
12341 #   }
12342 #   output_arg {
12343 #     name: "output"
12344 #     type_attr: "Tout"
12345 #   }
12346 #   attr {
12347 #     name: "T"
12348 #     type: "type"
12349 #     default_value {
12350 #       type: DT_COMPLEX64
12351 #     }
12352 #     allowed_values {
12353 #       list {
12354 #         type: DT_COMPLEX64
12355 #         type: DT_COMPLEX128
12356 #       }
12357 #     }
12358 #   }
12359 #   attr {
12360 #     name: "Tout"
12361 #     type: "type"
12362 #     default_value {
12363 #       type: DT_FLOAT
12364 #     }
12365 #     allowed_values {
12366 #       list {
12367 #         type: DT_FLOAT
12368 #         type: DT_DOUBLE
12369 #       }
12370 #     }
12371 #   }
12372 # }
12373 # op {
12374 #   name: "RealDiv"
12375 #   input_arg {
12376 #     name: "x"
12377 #     type_attr: "T"
12378 #   }
12379 #   input_arg {
12380 #     name: "y"
12381 #     type_attr: "T"
12382 #   }
12383 #   output_arg {
12384 #     name: "z"
12385 #     type_attr: "T"
12386 #   }
12387 #   attr {
12388 #     name: "T"
12389 #     type: "type"
12390 #     allowed_values {
12391 #       list {
12392 #         type: DT_BFLOAT16
12393 #         type: DT_HALF
12394 #         type: DT_FLOAT
12395 #         type: DT_DOUBLE
12396 #         type: DT_UINT8
12397 #         type: DT_INT8
12398 #         type: DT_UINT16
12399 #         type: DT_INT16
12400 #         type: DT_INT32
12401 #         type: DT_INT64
12402 #         type: DT_COMPLEX64
12403 #         type: DT_COMPLEX128
12404 #       }
12405 #     }
12406 #   }
12407 # }
12408 # op {
12409 #   name: "Reciprocal"
12410 #   input_arg {
12411 #     name: "x"
12412 #     type_attr: "T"
12413 #   }
12414 #   output_arg {
12415 #     name: "y"
12416 #     type_attr: "T"
12417 #   }
12418 #   attr {
12419 #     name: "T"
12420 #     type: "type"
12421 #     allowed_values {
12422 #       list {
12423 #         type: DT_BFLOAT16
12424 #         type: DT_HALF
12425 #         type: DT_FLOAT
12426 #         type: DT_DOUBLE
12427 #         type: DT_INT32
12428 #         type: DT_INT64
12429 #         type: DT_COMPLEX64
12430 #         type: DT_COMPLEX128
12431 #       }
12432 #     }
12433 #   }
12434 # }
12435 # op {
12436 #   name: "ReciprocalGrad"
12437 #   input_arg {
12438 #     name: "y"
12439 #     type_attr: "T"
12440 #   }
12441 #   input_arg {
12442 #     name: "dy"
12443 #     type_attr: "T"
12444 #   }
12445 #   output_arg {
12446 #     name: "z"
12447 #     type_attr: "T"
12448 #   }
12449 #   attr {
12450 #     name: "T"
12451 #     type: "type"
12452 #     allowed_values {
12453 #       list {
12454 #         type: DT_BFLOAT16
12455 #         type: DT_HALF
12456 #         type: DT_FLOAT
12457 #         type: DT_DOUBLE
12458 #         type: DT_COMPLEX64
12459 #         type: DT_COMPLEX128
12460 #       }
12461 #     }
12462 #   }
12463 # }
12464 # op {
12465 #   name: "RequantizationRange"
12466 #   input_arg {
12467 #     name: "input"
12468 #     type_attr: "Tinput"
12469 #   }
12470 #   input_arg {
12471 #     name: "input_min"
12472 #     type: DT_FLOAT
12473 #   }
12474 #   input_arg {
12475 #     name: "input_max"
12476 #     type: DT_FLOAT
12477 #   }
12478 #   output_arg {
12479 #     name: "output_min"
12480 #     type: DT_FLOAT
12481 #   }
12482 #   output_arg {
12483 #     name: "output_max"
12484 #     type: DT_FLOAT
12485 #   }
12486 #   attr {
12487 #     name: "Tinput"
12488 #     type: "type"
12489 #     allowed_values {
12490 #       list {
12491 #         type: DT_QINT8
12492 #         type: DT_QUINT8
12493 #         type: DT_QINT32
12494 #         type: DT_QINT16
12495 #         type: DT_QUINT16
12496 #       }
12497 #     }
12498 #   }
12499 # }
12500 # op {
12501 #   name: "Requantize"
12502 #   input_arg {
12503 #     name: "input"
12504 #     type_attr: "Tinput"
12505 #   }
12506 #   input_arg {
12507 #     name: "input_min"
12508 #     type: DT_FLOAT
12509 #   }
12510 #   input_arg {
12511 #     name: "input_max"
12512 #     type: DT_FLOAT
12513 #   }
12514 #   input_arg {
12515 #     name: "requested_output_min"
12516 #     type: DT_FLOAT
12517 #   }
12518 #   input_arg {
12519 #     name: "requested_output_max"
12520 #     type: DT_FLOAT
12521 #   }
12522 #   output_arg {
12523 #     name: "output"
12524 #     type_attr: "out_type"
12525 #   }
12526 #   output_arg {
12527 #     name: "output_min"
12528 #     type: DT_FLOAT
12529 #   }
12530 #   output_arg {
12531 #     name: "output_max"
12532 #     type: DT_FLOAT
12533 #   }
12534 #   attr {
12535 #     name: "Tinput"
12536 #     type: "type"
12537 #     allowed_values {
12538 #       list {
12539 #         type: DT_QINT8
12540 #         type: DT_QUINT8
12541 #         type: DT_QINT32
12542 #         type: DT_QINT16
12543 #         type: DT_QUINT16
12544 #       }
12545 #     }
12546 #   }
12547 #   attr {
12548 #     name: "out_type"
12549 #     type: "type"
12550 #     allowed_values {
12551 #       list {
12552 #         type: DT_QINT8
12553 #         type: DT_QUINT8
12554 #         type: DT_QINT32
12555 #         type: DT_QINT16
12556 #         type: DT_QUINT16
12557 #       }
12558 #     }
12559 #   }
12560 # }
12561 # op {
12562 #   name: "Rint"
12563 #   input_arg {
12564 #     name: "x"
12565 #     type_attr: "T"
12566 #   }
12567 #   output_arg {
12568 #     name: "y"
12569 #     type_attr: "T"
12570 #   }
12571 #   attr {
12572 #     name: "T"
12573 #     type: "type"
12574 #     allowed_values {
12575 #       list {
12576 #         type: DT_BFLOAT16
12577 #         type: DT_HALF
12578 #         type: DT_FLOAT
12579 #         type: DT_DOUBLE
12580 #       }
12581 #     }
12582 #   }
12583 # }
12584 # op {
12585 #   name: "Round"
12586 #   input_arg {
12587 #     name: "x"
12588 #     type_attr: "T"
12589 #   }
12590 #   output_arg {
12591 #     name: "y"
12592 #     type_attr: "T"
12593 #   }
12594 #   attr {
12595 #     name: "T"
12596 #     type: "type"
12597 #     allowed_values {
12598 #       list {
12599 #         type: DT_BFLOAT16
12600 #         type: DT_HALF
12601 #         type: DT_FLOAT
12602 #         type: DT_DOUBLE
12603 #         type: DT_INT32
12604 #         type: DT_INT64
12605 #         type: DT_COMPLEX64
12606 #         type: DT_COMPLEX128
12607 #       }
12608 #     }
12609 #   }
12610 # }
12611 # op {
12612 #   name: "Rsqrt"
12613 #   input_arg {
12614 #     name: "x"
12615 #     type_attr: "T"
12616 #   }
12617 #   output_arg {
12618 #     name: "y"
12619 #     type_attr: "T"
12620 #   }
12621 #   attr {
12622 #     name: "T"
12623 #     type: "type"
12624 #     allowed_values {
12625 #       list {
12626 #         type: DT_BFLOAT16
12627 #         type: DT_HALF
12628 #         type: DT_FLOAT
12629 #         type: DT_DOUBLE
12630 #         type: DT_COMPLEX64
12631 #         type: DT_COMPLEX128
12632 #       }
12633 #     }
12634 #   }
12635 # }
12636 # op {
12637 #   name: "RsqrtGrad"
12638 #   input_arg {
12639 #     name: "y"
12640 #     type_attr: "T"
12641 #   }
12642 #   input_arg {
12643 #     name: "dy"
12644 #     type_attr: "T"
12645 #   }
12646 #   output_arg {
12647 #     name: "z"
12648 #     type_attr: "T"
12649 #   }
12650 #   attr {
12651 #     name: "T"
12652 #     type: "type"
12653 #     allowed_values {
12654 #       list {
12655 #         type: DT_BFLOAT16
12656 #         type: DT_HALF
12657 #         type: DT_FLOAT
12658 #         type: DT_DOUBLE
12659 #         type: DT_COMPLEX64
12660 #         type: DT_COMPLEX128
12661 #       }
12662 #     }
12663 #   }
12664 # }
12665 # op {
12666 #   name: "SegmentMax"
12667 #   input_arg {
12668 #     name: "data"
12669 #     type_attr: "T"
12670 #   }
12671 #   input_arg {
12672 #     name: "segment_ids"
12673 #     type_attr: "Tindices"
12674 #   }
12675 #   output_arg {
12676 #     name: "output"
12677 #     type_attr: "T"
12678 #   }
12679 #   attr {
12680 #     name: "T"
12681 #     type: "type"
12682 #     allowed_values {
12683 #       list {
12684 #         type: DT_FLOAT
12685 #         type: DT_DOUBLE
12686 #         type: DT_INT32
12687 #         type: DT_UINT8
12688 #         type: DT_INT16
12689 #         type: DT_INT8
12690 #         type: DT_INT64
12691 #         type: DT_BFLOAT16
12692 #         type: DT_UINT16
12693 #         type: DT_HALF
12694 #         type: DT_UINT32
12695 #         type: DT_UINT64
12696 #       }
12697 #     }
12698 #   }
12699 #   attr {
12700 #     name: "Tindices"
12701 #     type: "type"
12702 #     allowed_values {
12703 #       list {
12704 #         type: DT_INT32
12705 #         type: DT_INT64
12706 #       }
12707 #     }
12708 #   }
12709 # }
12710 # op {
12711 #   name: "SegmentMean"
12712 #   input_arg {
12713 #     name: "data"
12714 #     type_attr: "T"
12715 #   }
12716 #   input_arg {
12717 #     name: "segment_ids"
12718 #     type_attr: "Tindices"
12719 #   }
12720 #   output_arg {
12721 #     name: "output"
12722 #     type_attr: "T"
12723 #   }
12724 #   attr {
12725 #     name: "T"
12726 #     type: "type"
12727 #     allowed_values {
12728 #       list {
12729 #         type: DT_FLOAT
12730 #         type: DT_DOUBLE
12731 #         type: DT_INT32
12732 #         type: DT_UINT8
12733 #         type: DT_INT16
12734 #         type: DT_INT8
12735 #         type: DT_COMPLEX64
12736 #         type: DT_INT64
12737 #         type: DT_QINT8
12738 #         type: DT_QUINT8
12739 #         type: DT_QINT32
12740 #         type: DT_BFLOAT16
12741 #         type: DT_UINT16
12742 #         type: DT_COMPLEX128
12743 #         type: DT_HALF
12744 #         type: DT_UINT32
12745 #         type: DT_UINT64
12746 #       }
12747 #     }
12748 #   }
12749 #   attr {
12750 #     name: "Tindices"
12751 #     type: "type"
12752 #     allowed_values {
12753 #       list {
12754 #         type: DT_INT32
12755 #         type: DT_INT64
12756 #       }
12757 #     }
12758 #   }
12759 # }
12760 # op {
12761 #   name: "SegmentMin"
12762 #   input_arg {
12763 #     name: "data"
12764 #     type_attr: "T"
12765 #   }
12766 #   input_arg {
12767 #     name: "segment_ids"
12768 #     type_attr: "Tindices"
12769 #   }
12770 #   output_arg {
12771 #     name: "output"
12772 #     type_attr: "T"
12773 #   }
12774 #   attr {
12775 #     name: "T"
12776 #     type: "type"
12777 #     allowed_values {
12778 #       list {
12779 #         type: DT_FLOAT
12780 #         type: DT_DOUBLE
12781 #         type: DT_INT32
12782 #         type: DT_UINT8
12783 #         type: DT_INT16
12784 #         type: DT_INT8
12785 #         type: DT_INT64
12786 #         type: DT_BFLOAT16
12787 #         type: DT_UINT16
12788 #         type: DT_HALF
12789 #         type: DT_UINT32
12790 #         type: DT_UINT64
12791 #       }
12792 #     }
12793 #   }
12794 #   attr {
12795 #     name: "Tindices"
12796 #     type: "type"
12797 #     allowed_values {
12798 #       list {
12799 #         type: DT_INT32
12800 #         type: DT_INT64
12801 #       }
12802 #     }
12803 #   }
12804 # }
12805 # op {
12806 #   name: "SegmentProd"
12807 #   input_arg {
12808 #     name: "data"
12809 #     type_attr: "T"
12810 #   }
12811 #   input_arg {
12812 #     name: "segment_ids"
12813 #     type_attr: "Tindices"
12814 #   }
12815 #   output_arg {
12816 #     name: "output"
12817 #     type_attr: "T"
12818 #   }
12819 #   attr {
12820 #     name: "T"
12821 #     type: "type"
12822 #     allowed_values {
12823 #       list {
12824 #         type: DT_FLOAT
12825 #         type: DT_DOUBLE
12826 #         type: DT_INT32
12827 #         type: DT_UINT8
12828 #         type: DT_INT16
12829 #         type: DT_INT8
12830 #         type: DT_COMPLEX64
12831 #         type: DT_INT64
12832 #         type: DT_QINT8
12833 #         type: DT_QUINT8
12834 #         type: DT_QINT32
12835 #         type: DT_BFLOAT16
12836 #         type: DT_UINT16
12837 #         type: DT_COMPLEX128
12838 #         type: DT_HALF
12839 #         type: DT_UINT32
12840 #         type: DT_UINT64
12841 #       }
12842 #     }
12843 #   }
12844 #   attr {
12845 #     name: "Tindices"
12846 #     type: "type"
12847 #     allowed_values {
12848 #       list {
12849 #         type: DT_INT32
12850 #         type: DT_INT64
12851 #       }
12852 #     }
12853 #   }
12854 # }
12855 # op {
12856 #   name: "SegmentSum"
12857 #   input_arg {
12858 #     name: "data"
12859 #     type_attr: "T"
12860 #   }
12861 #   input_arg {
12862 #     name: "segment_ids"
12863 #     type_attr: "Tindices"
12864 #   }
12865 #   output_arg {
12866 #     name: "output"
12867 #     type_attr: "T"
12868 #   }
12869 #   attr {
12870 #     name: "T"
12871 #     type: "type"
12872 #     allowed_values {
12873 #       list {
12874 #         type: DT_FLOAT
12875 #         type: DT_DOUBLE
12876 #         type: DT_INT32
12877 #         type: DT_UINT8
12878 #         type: DT_INT16
12879 #         type: DT_INT8
12880 #         type: DT_COMPLEX64
12881 #         type: DT_INT64
12882 #         type: DT_QINT8
12883 #         type: DT_QUINT8
12884 #         type: DT_QINT32
12885 #         type: DT_BFLOAT16
12886 #         type: DT_UINT16
12887 #         type: DT_COMPLEX128
12888 #         type: DT_HALF
12889 #         type: DT_UINT32
12890 #         type: DT_UINT64
12891 #       }
12892 #     }
12893 #   }
12894 #   attr {
12895 #     name: "Tindices"
12896 #     type: "type"
12897 #     allowed_values {
12898 #       list {
12899 #         type: DT_INT32
12900 #         type: DT_INT64
12901 #       }
12902 #     }
12903 #   }
12904 # }
12905 # op {
12906 #   name: "Select"
12907 #   input_arg {
12908 #     name: "condition"
12909 #     type: DT_BOOL
12910 #   }
12911 #   input_arg {
12912 #     name: "t"
12913 #     type_attr: "T"
12914 #   }
12915 #   input_arg {
12916 #     name: "e"
12917 #     type_attr: "T"
12918 #   }
12919 #   output_arg {
12920 #     name: "output"
12921 #     type_attr: "T"
12922 #   }
12923 #   attr {
12924 #     name: "T"
12925 #     type: "type"
12926 #   }
12927 # }
12928 # op {
12929 #   name: "Sigmoid"
12930 #   input_arg {
12931 #     name: "x"
12932 #     type_attr: "T"
12933 #   }
12934 #   output_arg {
12935 #     name: "y"
12936 #     type_attr: "T"
12937 #   }
12938 #   attr {
12939 #     name: "T"
12940 #     type: "type"
12941 #     allowed_values {
12942 #       list {
12943 #         type: DT_BFLOAT16
12944 #         type: DT_HALF
12945 #         type: DT_FLOAT
12946 #         type: DT_DOUBLE
12947 #         type: DT_COMPLEX64
12948 #         type: DT_COMPLEX128
12949 #       }
12950 #     }
12951 #   }
12952 # }
12953 # op {
12954 #   name: "SigmoidGrad"
12955 #   input_arg {
12956 #     name: "y"
12957 #     type_attr: "T"
12958 #   }
12959 #   input_arg {
12960 #     name: "dy"
12961 #     type_attr: "T"
12962 #   }
12963 #   output_arg {
12964 #     name: "z"
12965 #     type_attr: "T"
12966 #   }
12967 #   attr {
12968 #     name: "T"
12969 #     type: "type"
12970 #     allowed_values {
12971 #       list {
12972 #         type: DT_BFLOAT16
12973 #         type: DT_HALF
12974 #         type: DT_FLOAT
12975 #         type: DT_DOUBLE
12976 #         type: DT_COMPLEX64
12977 #         type: DT_COMPLEX128
12978 #       }
12979 #     }
12980 #   }
12981 # }
12982 # op {
12983 #   name: "Sign"
12984 #   input_arg {
12985 #     name: "x"
12986 #     type_attr: "T"
12987 #   }
12988 #   output_arg {
12989 #     name: "y"
12990 #     type_attr: "T"
12991 #   }
12992 #   attr {
12993 #     name: "T"
12994 #     type: "type"
12995 #     allowed_values {
12996 #       list {
12997 #         type: DT_BFLOAT16
12998 #         type: DT_HALF
12999 #         type: DT_FLOAT
13000 #         type: DT_DOUBLE
13001 #         type: DT_INT32
13002 #         type: DT_INT64
13003 #         type: DT_COMPLEX64
13004 #         type: DT_COMPLEX128
13005 #       }
13006 #     }
13007 #   }
13008 # }
13009 # op {
13010 #   name: "Sin"
13011 #   input_arg {
13012 #     name: "x"
13013 #     type_attr: "T"
13014 #   }
13015 #   output_arg {
13016 #     name: "y"
13017 #     type_attr: "T"
13018 #   }
13019 #   attr {
13020 #     name: "T"
13021 #     type: "type"
13022 #     allowed_values {
13023 #       list {
13024 #         type: DT_BFLOAT16
13025 #         type: DT_HALF
13026 #         type: DT_FLOAT
13027 #         type: DT_DOUBLE
13028 #         type: DT_COMPLEX64
13029 #         type: DT_COMPLEX128
13030 #       }
13031 #     }
13032 #   }
13033 # }
13034 # op {
13035 #   name: "Sinh"
13036 #   input_arg {
13037 #     name: "x"
13038 #     type_attr: "T"
13039 #   }
13040 #   output_arg {
13041 #     name: "y"
13042 #     type_attr: "T"
13043 #   }
13044 #   attr {
13045 #     name: "T"
13046 #     type: "type"
13047 #     allowed_values {
13048 #       list {
13049 #         type: DT_BFLOAT16
13050 #         type: DT_HALF
13051 #         type: DT_FLOAT
13052 #         type: DT_DOUBLE
13053 #         type: DT_COMPLEX64
13054 #         type: DT_COMPLEX128
13055 #       }
13056 #     }
13057 #   }
13058 # }
13059 # op {
13060 #   name: "SparseMatMul"
13061 #   input_arg {
13062 #     name: "a"
13063 #     type_attr: "Ta"
13064 #   }
13065 #   input_arg {
13066 #     name: "b"
13067 #     type_attr: "Tb"
13068 #   }
13069 #   output_arg {
13070 #     name: "product"
13071 #     type: DT_FLOAT
13072 #   }
13073 #   attr {
13074 #     name: "transpose_a"
13075 #     type: "bool"
13076 #     default_value {
13077 #       b: false
13078 #     }
13079 #   }
13080 #   attr {
13081 #     name: "transpose_b"
13082 #     type: "bool"
13083 #     default_value {
13084 #       b: false
13085 #     }
13086 #   }
13087 #   attr {
13088 #     name: "a_is_sparse"
13089 #     type: "bool"
13090 #     default_value {
13091 #       b: false
13092 #     }
13093 #   }
13094 #   attr {
13095 #     name: "b_is_sparse"
13096 #     type: "bool"
13097 #     default_value {
13098 #       b: false
13099 #     }
13100 #   }
13101 #   attr {
13102 #     name: "Ta"
13103 #     type: "type"
13104 #     default_value {
13105 #       type: DT_FLOAT
13106 #     }
13107 #     allowed_values {
13108 #       list {
13109 #         type: DT_FLOAT
13110 #         type: DT_BFLOAT16
13111 #       }
13112 #     }
13113 #   }
13114 #   attr {
13115 #     name: "Tb"
13116 #     type: "type"
13117 #     default_value {
13118 #       type: DT_FLOAT
13119 #     }
13120 #     allowed_values {
13121 #       list {
13122 #         type: DT_FLOAT
13123 #         type: DT_BFLOAT16
13124 #       }
13125 #     }
13126 #   }
13127 # }
13128 # op {
13129 #   name: "SparseSegmentMean"
13130 #   input_arg {
13131 #     name: "data"
13132 #     type_attr: "T"
13133 #   }
13134 #   input_arg {
13135 #     name: "indices"
13136 #     type_attr: "Tidx"
13137 #   }
13138 #   input_arg {
13139 #     name: "segment_ids"
13140 #     type: DT_INT32
13141 #   }
13142 #   output_arg {
13143 #     name: "output"
13144 #     type_attr: "T"
13145 #   }
13146 #   attr {
13147 #     name: "T"
13148 #     type: "type"
13149 #     allowed_values {
13150 #       list {
13151 #         type: DT_FLOAT
13152 #         type: DT_DOUBLE
13153 #       }
13154 #     }
13155 #   }
13156 #   attr {
13157 #     name: "Tidx"
13158 #     type: "type"
13159 #     default_value {
13160 #       type: DT_INT32
13161 #     }
13162 #     allowed_values {
13163 #       list {
13164 #         type: DT_INT32
13165 #         type: DT_INT64
13166 #       }
13167 #     }
13168 #   }
13169 # }
13170 # op {
13171 #   name: "SparseSegmentMeanGrad"
13172 #   input_arg {
13173 #     name: "grad"
13174 #     type_attr: "T"
13175 #   }
13176 #   input_arg {
13177 #     name: "indices"
13178 #     type_attr: "Tidx"
13179 #   }
13180 #   input_arg {
13181 #     name: "segment_ids"
13182 #     type: DT_INT32
13183 #   }
13184 #   input_arg {
13185 #     name: "output_dim0"
13186 #     type: DT_INT32
13187 #   }
13188 #   output_arg {
13189 #     name: "output"
13190 #     type_attr: "T"
13191 #   }
13192 #   attr {
13193 #     name: "T"
13194 #     type: "type"
13195 #     allowed_values {
13196 #       list {
13197 #         type: DT_FLOAT
13198 #         type: DT_DOUBLE
13199 #       }
13200 #     }
13201 #   }
13202 #   attr {
13203 #     name: "Tidx"
13204 #     type: "type"
13205 #     default_value {
13206 #       type: DT_INT32
13207 #     }
13208 #     allowed_values {
13209 #       list {
13210 #         type: DT_INT32
13211 #         type: DT_INT64
13212 #       }
13213 #     }
13214 #   }
13215 # }
13216 # op {
13217 #   name: "SparseSegmentMeanWithNumSegments"
13218 #   input_arg {
13219 #     name: "data"
13220 #     type_attr: "T"
13221 #   }
13222 #   input_arg {
13223 #     name: "indices"
13224 #     type_attr: "Tidx"
13225 #   }
13226 #   input_arg {
13227 #     name: "segment_ids"
13228 #     type: DT_INT32
13229 #   }
13230 #   input_arg {
13231 #     name: "num_segments"
13232 #     type_attr: "Tnumsegments"
13233 #   }
13234 #   output_arg {
13235 #     name: "output"
13236 #     type_attr: "T"
13237 #   }
13238 #   attr {
13239 #     name: "T"
13240 #     type: "type"
13241 #     allowed_values {
13242 #       list {
13243 #         type: DT_FLOAT
13244 #         type: DT_DOUBLE
13245 #       }
13246 #     }
13247 #   }
13248 #   attr {
13249 #     name: "Tidx"
13250 #     type: "type"
13251 #     default_value {
13252 #       type: DT_INT32
13253 #     }
13254 #     allowed_values {
13255 #       list {
13256 #         type: DT_INT32
13257 #         type: DT_INT64
13258 #       }
13259 #     }
13260 #   }
13261 #   attr {
13262 #     name: "Tnumsegments"
13263 #     type: "type"
13264 #     default_value {
13265 #       type: DT_INT32
13266 #     }
13267 #     allowed_values {
13268 #       list {
13269 #         type: DT_INT32
13270 #         type: DT_INT64
13271 #       }
13272 #     }
13273 #   }
13274 # }
13275 # op {
13276 #   name: "SparseSegmentSqrtN"
13277 #   input_arg {
13278 #     name: "data"
13279 #     type_attr: "T"
13280 #   }
13281 #   input_arg {
13282 #     name: "indices"
13283 #     type_attr: "Tidx"
13284 #   }
13285 #   input_arg {
13286 #     name: "segment_ids"
13287 #     type: DT_INT32
13288 #   }
13289 #   output_arg {
13290 #     name: "output"
13291 #     type_attr: "T"
13292 #   }
13293 #   attr {
13294 #     name: "T"
13295 #     type: "type"
13296 #     allowed_values {
13297 #       list {
13298 #         type: DT_FLOAT
13299 #         type: DT_DOUBLE
13300 #       }
13301 #     }
13302 #   }
13303 #   attr {
13304 #     name: "Tidx"
13305 #     type: "type"
13306 #     default_value {
13307 #       type: DT_INT32
13308 #     }
13309 #     allowed_values {
13310 #       list {
13311 #         type: DT_INT32
13312 #         type: DT_INT64
13313 #       }
13314 #     }
13315 #   }
13316 # }
13317 # op {
13318 #   name: "SparseSegmentSqrtNGrad"
13319 #   input_arg {
13320 #     name: "grad"
13321 #     type_attr: "T"
13322 #   }
13323 #   input_arg {
13324 #     name: "indices"
13325 #     type_attr: "Tidx"
13326 #   }
13327 #   input_arg {
13328 #     name: "segment_ids"
13329 #     type: DT_INT32
13330 #   }
13331 #   input_arg {
13332 #     name: "output_dim0"
13333 #     type: DT_INT32
13334 #   }
13335 #   output_arg {
13336 #     name: "output"
13337 #     type_attr: "T"
13338 #   }
13339 #   attr {
13340 #     name: "T"
13341 #     type: "type"
13342 #     allowed_values {
13343 #       list {
13344 #         type: DT_FLOAT
13345 #         type: DT_DOUBLE
13346 #       }
13347 #     }
13348 #   }
13349 #   attr {
13350 #     name: "Tidx"
13351 #     type: "type"
13352 #     default_value {
13353 #       type: DT_INT32
13354 #     }
13355 #     allowed_values {
13356 #       list {
13357 #         type: DT_INT32
13358 #         type: DT_INT64
13359 #       }
13360 #     }
13361 #   }
13362 # }
13363 # op {
13364 #   name: "SparseSegmentSqrtNWithNumSegments"
13365 #   input_arg {
13366 #     name: "data"
13367 #     type_attr: "T"
13368 #   }
13369 #   input_arg {
13370 #     name: "indices"
13371 #     type_attr: "Tidx"
13372 #   }
13373 #   input_arg {
13374 #     name: "segment_ids"
13375 #     type: DT_INT32
13376 #   }
13377 #   input_arg {
13378 #     name: "num_segments"
13379 #     type_attr: "Tnumsegments"
13380 #   }
13381 #   output_arg {
13382 #     name: "output"
13383 #     type_attr: "T"
13384 #   }
13385 #   attr {
13386 #     name: "T"
13387 #     type: "type"
13388 #     allowed_values {
13389 #       list {
13390 #         type: DT_FLOAT
13391 #         type: DT_DOUBLE
13392 #       }
13393 #     }
13394 #   }
13395 #   attr {
13396 #     name: "Tidx"
13397 #     type: "type"
13398 #     default_value {
13399 #       type: DT_INT32
13400 #     }
13401 #     allowed_values {
13402 #       list {
13403 #         type: DT_INT32
13404 #         type: DT_INT64
13405 #       }
13406 #     }
13407 #   }
13408 #   attr {
13409 #     name: "Tnumsegments"
13410 #     type: "type"
13411 #     default_value {
13412 #       type: DT_INT32
13413 #     }
13414 #     allowed_values {
13415 #       list {
13416 #         type: DT_INT32
13417 #         type: DT_INT64
13418 #       }
13419 #     }
13420 #   }
13421 # }
13422 # op {
13423 #   name: "SparseSegmentSum"
13424 #   input_arg {
13425 #     name: "data"
13426 #     type_attr: "T"
13427 #   }
13428 #   input_arg {
13429 #     name: "indices"
13430 #     type_attr: "Tidx"
13431 #   }
13432 #   input_arg {
13433 #     name: "segment_ids"
13434 #     type: DT_INT32
13435 #   }
13436 #   output_arg {
13437 #     name: "output"
13438 #     type_attr: "T"
13439 #   }
13440 #   attr {
13441 #     name: "T"
13442 #     type: "type"
13443 #     allowed_values {
13444 #       list {
13445 #         type: DT_FLOAT
13446 #         type: DT_DOUBLE
13447 #         type: DT_INT32
13448 #         type: DT_UINT8
13449 #         type: DT_INT16
13450 #         type: DT_INT8
13451 #         type: DT_INT64
13452 #         type: DT_BFLOAT16
13453 #         type: DT_UINT16
13454 #         type: DT_HALF
13455 #         type: DT_UINT32
13456 #         type: DT_UINT64
13457 #       }
13458 #     }
13459 #   }
13460 #   attr {
13461 #     name: "Tidx"
13462 #     type: "type"
13463 #     default_value {
13464 #       type: DT_INT32
13465 #     }
13466 #     allowed_values {
13467 #       list {
13468 #         type: DT_INT32
13469 #         type: DT_INT64
13470 #       }
13471 #     }
13472 #   }
13473 # }
13474 # op {
13475 #   name: "SparseSegmentSumWithNumSegments"
13476 #   input_arg {
13477 #     name: "data"
13478 #     type_attr: "T"
13479 #   }
13480 #   input_arg {
13481 #     name: "indices"
13482 #     type_attr: "Tidx"
13483 #   }
13484 #   input_arg {
13485 #     name: "segment_ids"
13486 #     type: DT_INT32
13487 #   }
13488 #   input_arg {
13489 #     name: "num_segments"
13490 #     type_attr: "Tnumsegments"
13491 #   }
13492 #   output_arg {
13493 #     name: "output"
13494 #     type_attr: "T"
13495 #   }
13496 #   attr {
13497 #     name: "T"
13498 #     type: "type"
13499 #     allowed_values {
13500 #       list {
13501 #         type: DT_FLOAT
13502 #         type: DT_DOUBLE
13503 #         type: DT_INT32
13504 #         type: DT_UINT8
13505 #         type: DT_INT16
13506 #         type: DT_INT8
13507 #         type: DT_INT64
13508 #         type: DT_BFLOAT16
13509 #         type: DT_UINT16
13510 #         type: DT_HALF
13511 #         type: DT_UINT32
13512 #         type: DT_UINT64
13513 #       }
13514 #     }
13515 #   }
13516 #   attr {
13517 #     name: "Tidx"
13518 #     type: "type"
13519 #     default_value {
13520 #       type: DT_INT32
13521 #     }
13522 #     allowed_values {
13523 #       list {
13524 #         type: DT_INT32
13525 #         type: DT_INT64
13526 #       }
13527 #     }
13528 #   }
13529 #   attr {
13530 #     name: "Tnumsegments"
13531 #     type: "type"
13532 #     default_value {
13533 #       type: DT_INT32
13534 #     }
13535 #     allowed_values {
13536 #       list {
13537 #         type: DT_INT32
13538 #         type: DT_INT64
13539 #       }
13540 #     }
13541 #   }
13542 # }
13543 # op {
13544 #   name: "Sqrt"
13545 #   input_arg {
13546 #     name: "x"
13547 #     type_attr: "T"
13548 #   }
13549 #   output_arg {
13550 #     name: "y"
13551 #     type_attr: "T"
13552 #   }
13553 #   attr {
13554 #     name: "T"
13555 #     type: "type"
13556 #     allowed_values {
13557 #       list {
13558 #         type: DT_BFLOAT16
13559 #         type: DT_HALF
13560 #         type: DT_FLOAT
13561 #         type: DT_DOUBLE
13562 #         type: DT_COMPLEX64
13563 #         type: DT_COMPLEX128
13564 #       }
13565 #     }
13566 #   }
13567 # }
13568 # op {
13569 #   name: "SqrtGrad"
13570 #   input_arg {
13571 #     name: "y"
13572 #     type_attr: "T"
13573 #   }
13574 #   input_arg {
13575 #     name: "dy"
13576 #     type_attr: "T"
13577 #   }
13578 #   output_arg {
13579 #     name: "z"
13580 #     type_attr: "T"
13581 #   }
13582 #   attr {
13583 #     name: "T"
13584 #     type: "type"
13585 #     allowed_values {
13586 #       list {
13587 #         type: DT_BFLOAT16
13588 #         type: DT_HALF
13589 #         type: DT_FLOAT
13590 #         type: DT_DOUBLE
13591 #         type: DT_COMPLEX64
13592 #         type: DT_COMPLEX128
13593 #       }
13594 #     }
13595 #   }
13596 # }
13597 # op {
13598 #   name: "Square"
13599 #   input_arg {
13600 #     name: "x"
13601 #     type_attr: "T"
13602 #   }
13603 #   output_arg {
13604 #     name: "y"
13605 #     type_attr: "T"
13606 #   }
13607 #   attr {
13608 #     name: "T"
13609 #     type: "type"
13610 #     allowed_values {
13611 #       list {
13612 #         type: DT_BFLOAT16
13613 #         type: DT_HALF
13614 #         type: DT_FLOAT
13615 #         type: DT_DOUBLE
13616 #         type: DT_INT32
13617 #         type: DT_INT64
13618 #         type: DT_COMPLEX64
13619 #         type: DT_COMPLEX128
13620 #       }
13621 #     }
13622 #   }
13623 # }
13624 # op {
13625 #   name: "SquaredDifference"
13626 #   input_arg {
13627 #     name: "x"
13628 #     type_attr: "T"
13629 #   }
13630 #   input_arg {
13631 #     name: "y"
13632 #     type_attr: "T"
13633 #   }
13634 #   output_arg {
13635 #     name: "z"
13636 #     type_attr: "T"
13637 #   }
13638 #   attr {
13639 #     name: "T"
13640 #     type: "type"
13641 #     allowed_values {
13642 #       list {
13643 #         type: DT_BFLOAT16
13644 #         type: DT_HALF
13645 #         type: DT_FLOAT
13646 #         type: DT_DOUBLE
13647 #         type: DT_INT32
13648 #         type: DT_INT64
13649 #         type: DT_COMPLEX64
13650 #         type: DT_COMPLEX128
13651 #       }
13652 #     }
13653 #   }
13654 #   is_commutative: true
13655 # }
13656 # op {
13657 #   name: "Sub"
13658 #   input_arg {
13659 #     name: "x"
13660 #     type_attr: "T"
13661 #   }
13662 #   input_arg {
13663 #     name: "y"
13664 #     type_attr: "T"
13665 #   }
13666 #   output_arg {
13667 #     name: "z"
13668 #     type_attr: "T"
13669 #   }
13670 #   attr {
13671 #     name: "T"
13672 #     type: "type"
13673 #     allowed_values {
13674 #       list {
13675 #         type: DT_BFLOAT16
13676 #         type: DT_HALF
13677 #         type: DT_FLOAT
13678 #         type: DT_DOUBLE
13679 #         type: DT_UINT8
13680 #         type: DT_INT8
13681 #         type: DT_UINT16
13682 #         type: DT_INT16
13683 #         type: DT_INT32
13684 #         type: DT_INT64
13685 #         type: DT_COMPLEX64
13686 #         type: DT_COMPLEX128
13687 #       }
13688 #     }
13689 #   }
13690 # }
13691 # op {
13692 #   name: "Sum"
13693 #   input_arg {
13694 #     name: "input"
13695 #     type_attr: "T"
13696 #   }
13697 #   input_arg {
13698 #     name: "reduction_indices"
13699 #     type_attr: "Tidx"
13700 #   }
13701 #   output_arg {
13702 #     name: "output"
13703 #     type_attr: "T"
13704 #   }
13705 #   attr {
13706 #     name: "keep_dims"
13707 #     type: "bool"
13708 #     default_value {
13709 #       b: false
13710 #     }
13711 #   }
13712 #   attr {
13713 #     name: "T"
13714 #     type: "type"
13715 #     allowed_values {
13716 #       list {
13717 #         type: DT_FLOAT
13718 #         type: DT_DOUBLE
13719 #         type: DT_INT32
13720 #         type: DT_UINT8
13721 #         type: DT_INT16
13722 #         type: DT_INT8
13723 #         type: DT_COMPLEX64
13724 #         type: DT_INT64
13725 #         type: DT_QINT8
13726 #         type: DT_QUINT8
13727 #         type: DT_QINT32
13728 #         type: DT_BFLOAT16
13729 #         type: DT_UINT16
13730 #         type: DT_COMPLEX128
13731 #         type: DT_HALF
13732 #         type: DT_UINT32
13733 #         type: DT_UINT64
13734 #       }
13735 #     }
13736 #   }
13737 #   attr {
13738 #     name: "Tidx"
13739 #     type: "type"
13740 #     default_value {
13741 #       type: DT_INT32
13742 #     }
13743 #     allowed_values {
13744 #       list {
13745 #         type: DT_INT32
13746 #         type: DT_INT64
13747 #       }
13748 #     }
13749 #   }
13750 # }
13751 # op {
13752 #   name: "Tan"
13753 #   input_arg {
13754 #     name: "x"
13755 #     type_attr: "T"
13756 #   }
13757 #   output_arg {
13758 #     name: "y"
13759 #     type_attr: "T"
13760 #   }
13761 #   attr {
13762 #     name: "T"
13763 #     type: "type"
13764 #     allowed_values {
13765 #       list {
13766 #         type: DT_BFLOAT16
13767 #         type: DT_HALF
13768 #         type: DT_FLOAT
13769 #         type: DT_DOUBLE
13770 #         type: DT_INT32
13771 #         type: DT_INT64
13772 #         type: DT_COMPLEX64
13773 #         type: DT_COMPLEX128
13774 #       }
13775 #     }
13776 #   }
13777 # }
13778 # op {
13779 #   name: "Tanh"
13780 #   input_arg {
13781 #     name: "x"
13782 #     type_attr: "T"
13783 #   }
13784 #   output_arg {
13785 #     name: "y"
13786 #     type_attr: "T"
13787 #   }
13788 #   attr {
13789 #     name: "T"
13790 #     type: "type"
13791 #     allowed_values {
13792 #       list {
13793 #         type: DT_BFLOAT16
13794 #         type: DT_HALF
13795 #         type: DT_FLOAT
13796 #         type: DT_DOUBLE
13797 #         type: DT_COMPLEX64
13798 #         type: DT_COMPLEX128
13799 #       }
13800 #     }
13801 #   }
13802 # }
13803 # op {
13804 #   name: "TanhGrad"
13805 #   input_arg {
13806 #     name: "y"
13807 #     type_attr: "T"
13808 #   }
13809 #   input_arg {
13810 #     name: "dy"
13811 #     type_attr: "T"
13812 #   }
13813 #   output_arg {
13814 #     name: "z"
13815 #     type_attr: "T"
13816 #   }
13817 #   attr {
13818 #     name: "T"
13819 #     type: "type"
13820 #     allowed_values {
13821 #       list {
13822 #         type: DT_BFLOAT16
13823 #         type: DT_HALF
13824 #         type: DT_FLOAT
13825 #         type: DT_DOUBLE
13826 #         type: DT_COMPLEX64
13827 #         type: DT_COMPLEX128
13828 #       }
13829 #     }
13830 #   }
13831 # }
13832 # op {
13833 #   name: "TruncateDiv"
13834 #   input_arg {
13835 #     name: "x"
13836 #     type_attr: "T"
13837 #   }
13838 #   input_arg {
13839 #     name: "y"
13840 #     type_attr: "T"
13841 #   }
13842 #   output_arg {
13843 #     name: "z"
13844 #     type_attr: "T"
13845 #   }
13846 #   attr {
13847 #     name: "T"
13848 #     type: "type"
13849 #     allowed_values {
13850 #       list {
13851 #         type: DT_BFLOAT16
13852 #         type: DT_HALF
13853 #         type: DT_FLOAT
13854 #         type: DT_DOUBLE
13855 #         type: DT_UINT8
13856 #         type: DT_INT8
13857 #         type: DT_UINT16
13858 #         type: DT_INT16
13859 #         type: DT_INT32
13860 #         type: DT_INT64
13861 #         type: DT_COMPLEX64
13862 #         type: DT_COMPLEX128
13863 #       }
13864 #     }
13865 #   }
13866 # }
13867 # op {
13868 #   name: "TruncateMod"
13869 #   input_arg {
13870 #     name: "x"
13871 #     type_attr: "T"
13872 #   }
13873 #   input_arg {
13874 #     name: "y"
13875 #     type_attr: "T"
13876 #   }
13877 #   output_arg {
13878 #     name: "z"
13879 #     type_attr: "T"
13880 #   }
13881 #   attr {
13882 #     name: "T"
13883 #     type: "type"
13884 #     allowed_values {
13885 #       list {
13886 #         type: DT_INT32
13887 #         type: DT_INT64
13888 #         type: DT_BFLOAT16
13889 #         type: DT_HALF
13890 #         type: DT_FLOAT
13891 #         type: DT_DOUBLE
13892 #       }
13893 #     }
13894 #   }
13895 # }
13896 # op {
13897 #   name: "UnsortedSegmentMax"
13898 #   input_arg {
13899 #     name: "data"
13900 #     type_attr: "T"
13901 #   }
13902 #   input_arg {
13903 #     name: "segment_ids"
13904 #     type_attr: "Tindices"
13905 #   }
13906 #   input_arg {
13907 #     name: "num_segments"
13908 #     type_attr: "Tnumsegments"
13909 #   }
13910 #   output_arg {
13911 #     name: "output"
13912 #     type_attr: "T"
13913 #   }
13914 #   attr {
13915 #     name: "T"
13916 #     type: "type"
13917 #     allowed_values {
13918 #       list {
13919 #         type: DT_FLOAT
13920 #         type: DT_DOUBLE
13921 #         type: DT_INT32
13922 #         type: DT_UINT8
13923 #         type: DT_INT16
13924 #         type: DT_INT8
13925 #         type: DT_INT64
13926 #         type: DT_BFLOAT16
13927 #         type: DT_UINT16
13928 #         type: DT_HALF
13929 #         type: DT_UINT32
13930 #         type: DT_UINT64
13931 #       }
13932 #     }
13933 #   }
13934 #   attr {
13935 #     name: "Tindices"
13936 #     type: "type"
13937 #     allowed_values {
13938 #       list {
13939 #         type: DT_INT32
13940 #         type: DT_INT64
13941 #       }
13942 #     }
13943 #   }
13944 #   attr {
13945 #     name: "Tnumsegments"
13946 #     type: "type"
13947 #     default_value {
13948 #       type: DT_INT32
13949 #     }
13950 #     allowed_values {
13951 #       list {
13952 #         type: DT_INT32
13953 #         type: DT_INT64
13954 #       }
13955 #     }
13956 #   }
13957 # }
13958 # op {
13959 #   name: "UnsortedSegmentMin"
13960 #   input_arg {
13961 #     name: "data"
13962 #     type_attr: "T"
13963 #   }
13964 #   input_arg {
13965 #     name: "segment_ids"
13966 #     type_attr: "Tindices"
13967 #   }
13968 #   input_arg {
13969 #     name: "num_segments"
13970 #     type_attr: "Tnumsegments"
13971 #   }
13972 #   output_arg {
13973 #     name: "output"
13974 #     type_attr: "T"
13975 #   }
13976 #   attr {
13977 #     name: "T"
13978 #     type: "type"
13979 #     allowed_values {
13980 #       list {
13981 #         type: DT_FLOAT
13982 #         type: DT_DOUBLE
13983 #         type: DT_INT32
13984 #         type: DT_UINT8
13985 #         type: DT_INT16
13986 #         type: DT_INT8
13987 #         type: DT_INT64
13988 #         type: DT_BFLOAT16
13989 #         type: DT_UINT16
13990 #         type: DT_HALF
13991 #         type: DT_UINT32
13992 #         type: DT_UINT64
13993 #       }
13994 #     }
13995 #   }
13996 #   attr {
13997 #     name: "Tindices"
13998 #     type: "type"
13999 #     allowed_values {
14000 #       list {
14001 #         type: DT_INT32
14002 #         type: DT_INT64
14003 #       }
14004 #     }
14005 #   }
14006 #   attr {
14007 #     name: "Tnumsegments"
14008 #     type: "type"
14009 #     default_value {
14010 #       type: DT_INT32
14011 #     }
14012 #     allowed_values {
14013 #       list {
14014 #         type: DT_INT32
14015 #         type: DT_INT64
14016 #       }
14017 #     }
14018 #   }
14019 # }
14020 # op {
14021 #   name: "UnsortedSegmentProd"
14022 #   input_arg {
14023 #     name: "data"
14024 #     type_attr: "T"
14025 #   }
14026 #   input_arg {
14027 #     name: "segment_ids"
14028 #     type_attr: "Tindices"
14029 #   }
14030 #   input_arg {
14031 #     name: "num_segments"
14032 #     type_attr: "Tnumsegments"
14033 #   }
14034 #   output_arg {
14035 #     name: "output"
14036 #     type_attr: "T"
14037 #   }
14038 #   attr {
14039 #     name: "T"
14040 #     type: "type"
14041 #     allowed_values {
14042 #       list {
14043 #         type: DT_FLOAT
14044 #         type: DT_DOUBLE
14045 #         type: DT_INT32
14046 #         type: DT_UINT8
14047 #         type: DT_INT16
14048 #         type: DT_INT8
14049 #         type: DT_COMPLEX64
14050 #         type: DT_INT64
14051 #         type: DT_QINT8
14052 #         type: DT_QUINT8
14053 #         type: DT_QINT32
14054 #         type: DT_BFLOAT16
14055 #         type: DT_UINT16
14056 #         type: DT_COMPLEX128
14057 #         type: DT_HALF
14058 #         type: DT_UINT32
14059 #         type: DT_UINT64
14060 #       }
14061 #     }
14062 #   }
14063 #   attr {
14064 #     name: "Tindices"
14065 #     type: "type"
14066 #     allowed_values {
14067 #       list {
14068 #         type: DT_INT32
14069 #         type: DT_INT64
14070 #       }
14071 #     }
14072 #   }
14073 #   attr {
14074 #     name: "Tnumsegments"
14075 #     type: "type"
14076 #     default_value {
14077 #       type: DT_INT32
14078 #     }
14079 #     allowed_values {
14080 #       list {
14081 #         type: DT_INT32
14082 #         type: DT_INT64
14083 #       }
14084 #     }
14085 #   }
14086 # }
14087 # op {
14088 #   name: "UnsortedSegmentSum"
14089 #   input_arg {
14090 #     name: "data"
14091 #     type_attr: "T"
14092 #   }
14093 #   input_arg {
14094 #     name: "segment_ids"
14095 #     type_attr: "Tindices"
14096 #   }
14097 #   input_arg {
14098 #     name: "num_segments"
14099 #     type_attr: "Tnumsegments"
14100 #   }
14101 #   output_arg {
14102 #     name: "output"
14103 #     type_attr: "T"
14104 #   }
14105 #   attr {
14106 #     name: "T"
14107 #     type: "type"
14108 #     allowed_values {
14109 #       list {
14110 #         type: DT_FLOAT
14111 #         type: DT_DOUBLE
14112 #         type: DT_INT32
14113 #         type: DT_UINT8
14114 #         type: DT_INT16
14115 #         type: DT_INT8
14116 #         type: DT_COMPLEX64
14117 #         type: DT_INT64
14118 #         type: DT_QINT8
14119 #         type: DT_QUINT8
14120 #         type: DT_QINT32
14121 #         type: DT_BFLOAT16
14122 #         type: DT_UINT16
14123 #         type: DT_COMPLEX128
14124 #         type: DT_HALF
14125 #         type: DT_UINT32
14126 #         type: DT_UINT64
14127 #       }
14128 #     }
14129 #   }
14130 #   attr {
14131 #     name: "Tindices"
14132 #     type: "type"
14133 #     allowed_values {
14134 #       list {
14135 #         type: DT_INT32
14136 #         type: DT_INT64
14137 #       }
14138 #     }
14139 #   }
14140 #   attr {
14141 #     name: "Tnumsegments"
14142 #     type: "type"
14143 #     default_value {
14144 #       type: DT_INT32
14145 #     }
14146 #     allowed_values {
14147 #       list {
14148 #         type: DT_INT32
14149 #         type: DT_INT64
14150 #       }
14151 #     }
14152 #   }
14153 # }
14154 # op {
14155 #   name: "Xdivy"
14156 #   input_arg {
14157 #     name: "x"
14158 #     type_attr: "T"
14159 #   }
14160 #   input_arg {
14161 #     name: "y"
14162 #     type_attr: "T"
14163 #   }
14164 #   output_arg {
14165 #     name: "z"
14166 #     type_attr: "T"
14167 #   }
14168 #   attr {
14169 #     name: "T"
14170 #     type: "type"
14171 #     allowed_values {
14172 #       list {
14173 #         type: DT_HALF
14174 #         type: DT_FLOAT
14175 #         type: DT_DOUBLE
14176 #         type: DT_COMPLEX64
14177 #         type: DT_COMPLEX128
14178 #       }
14179 #     }
14180 #   }
14181 # }
14182 # op {
14183 #   name: "Xlogy"
14184 #   input_arg {
14185 #     name: "x"
14186 #     type_attr: "T"
14187 #   }
14188 #   input_arg {
14189 #     name: "y"
14190 #     type_attr: "T"
14191 #   }
14192 #   output_arg {
14193 #     name: "z"
14194 #     type_attr: "T"
14195 #   }
14196 #   attr {
14197 #     name: "T"
14198 #     type: "type"
14199 #     allowed_values {
14200 #       list {
14201 #         type: DT_HALF
14202 #         type: DT_FLOAT
14203 #         type: DT_DOUBLE
14204 #         type: DT_COMPLEX64
14205 #         type: DT_COMPLEX128
14206 #       }
14207 #     }
14208 #   }
14209 # }
14210 # op {
14211 #   name: "Zeta"
14212 #   input_arg {
14213 #     name: "x"
14214 #     type_attr: "T"
14215 #   }
14216 #   input_arg {
14217 #     name: "q"
14218 #     type_attr: "T"
14219 #   }
14220 #   output_arg {
14221 #     name: "z"
14222 #     type_attr: "T"
14223 #   }
14224 #   attr {
14225 #     name: "T"
14226 #     type: "type"
14227 #     allowed_values {
14228 #       list {
14229 #         type: DT_FLOAT
14230 #         type: DT_DOUBLE
14231 #       }
14232 #     }
14233 #   }
14234 # }
14235 _op_def_lib = _InitOpDefLibrary(b"\n,\n\003Abs\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\no\n\rAccumulateNV2\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\016\n\005shape\022\005shape\200\001\001\220\001\001\n/\n\004Acos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Acosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\003Add\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\005\003\t\010\022\007\nW\n\004AddN\022\016\n\006inputs\"\001T*\001N\032\010\n\003sum\"\001T\"\014\n\001N\022\003int(\0010\001\"!\n\001T\022\004type:\026\n\0242\022\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\025\200\001\001\220\001\001\nA\n\005AddV2\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\032\n\001T\022\004type:\017\n\r2\013\016\023\001\002\004\006\005\003\t\010\022\200\001\001\220\001\001\nh\n\003All\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\nT\n\005Angle\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\nh\n\003Any\022\t\n\005input\030\n\022\031\n\021reduction_indices\"\004Tidx\032\n\n\006output\030\n\"\025\n\tkeep_dims\022\004bool\032\002(\000\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\ni\n\020ApproximateEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\031\n\ttolerance\022\005float\032\005%\254\305\'7\220\001\001\n\233\001\n\006ArgMax\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n\233\001\n\006ArgMin\022\n\n\005input\"\001T\022\021\n\tdimension\"\004Tidx\032\025\n\006output\"\013output_type\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\"\037\n\013output_type\022\004type\032\0020\t:\006\n\0042\002\003\t\n/\n\004Asin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Asinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Atan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n4\n\005Atan2\022\006\n\001y\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n.\n\005Atanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nh\n\013BatchMatMul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\"\021\n\005adj_x\022\004bool\032\002(\000\"\021\n\005adj_y\022\004bool\032\002(\000\n0\n\tBesselI0e\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\tBesselI1e\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n<\n\007Betainc\022\006\n\001a\"\001T\022\006\n\001b\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nK\n\010Bincount\022\007\n\003arr\030\003\022\010\n\004size\030\003\022\014\n\007weights\"\001T\032\t\n\004bins\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\nS\n\tBucketize\022\n\n\005input\"\001T\032\n\n\006output\030\003\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\nboundaries\022\013list(float)\nN\n\004Cast\022\t\n\001x\"\004SrcT\032\t\n\001y\"\004DstT\"\014\n\004SrcT\022\004type\"\014\n\004DstT\022\004type\"\024\n\010Truncate\022\004bool\032\002(\000\n+\n\004Ceil\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\nn\n\013ClipByValue\022\006\n\001t\"\001T\022\023\n\016clip_value_min\"\001T\022\023\n\016clip_value_max\"\001T\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\nT\n\021CompareAndBitpack\022\n\n\005input\"\001T\022\016\n\tthreshold\"\001T\032\n\n\006output\030\004\"\027\n\001T\022\004type:\014\n\n2\010\n\023\001\002\006\005\003\t\n]\n\007Complex\022\t\n\004real\"\001T\022\t\n\004imag\"\001T\032\013\n\003out\"\004Tout\"\025\n\001T\022\004type\032\0020\001:\006\n\0042\002\001\002\"\030\n\004Tout\022\004type\032\0020\010:\006\n\0042\002\010\022\nP\n\nComplexAbs\022\006\n\001x\"\001T\032\t\n\001y\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n7\n\004Conj\022\n\n\005input\"\001T\032\013\n\006output\"\001T\"\026\n\001T\022\004type\032\0020\010:\007\n\0052\003\010\022\025\n,\n\003Cos\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Cosh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\005Cross\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n\221\001\n\007Cumprod\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\006Cumsum\022\006\n\001x\"\001T\022\014\n\004axis\"\004Tidx\032\010\n\003out\"\001T\"\025\n\texclusive\022\004bool\032\002(\000\"\023\n\007reverse\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\007Digamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\003Div\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n5\n\010DivNoNan\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nB\n\005Equal\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n*\n\003Erf\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\004Erfc\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n,\n\003Exp\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Expm1\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n,\n\005Floor\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n?\n\010FloorDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n9\n\010FloorMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n=\n\007Greater\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\nB\n\014GreaterEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n}\n\023HistogramFixedWidth\022\013\n\006values\"\001T\022\020\n\013value_range\"\001T\022\t\n\005nbins\030\003\032\014\n\003out\"\005dtype\"\023\n\001T\022\004type:\010\n\0062\004\003\t\001\002\"\031\n\005dtype\022\004type\032\0020\003:\006\n\0042\002\003\t\n3\n\006Igamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n8\n\013IgammaGradA\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n4\n\007Igammac\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\nS\n\004Imag\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n.\n\003Inv\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n9\n\007InvGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\010IsFinite\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsInf\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n+\n\005IsNan\022\006\n\001x\"\001T\032\005\n\001y\030\n\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n:\n\004Less\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n?\n\tLessEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\n-\n\006Lgamma\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\ni\n\010LinSpace\022\n\n\005start\"\001T\022\t\n\004stop\"\001T\022\013\n\003num\"\004Tidx\032\013\n\006output\"\001T\"\022\n\001T\022\004type:\007\n\0052\003\016\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n,\n\003Log\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n.\n\005Log1p\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n$\n\nLogicalAnd\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\n\032\n\nLogicalNot\022\005\n\001x\030\n\032\005\n\001y\030\n\n#\n\tLogicalOr\022\005\n\001x\030\n\022\005\n\001y\030\n\032\005\n\001z\030\n\220\001\001\np\n\006MatMul\022\006\n\001a\"\001T\022\006\n\001b\"\001T\032\014\n\007product\"\001T\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\026\n\001T\022\004type:\013\n\t2\007\016\023\001\002\003\010\022\n\214\001\n\003Max\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Maximum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n\215\001\n\004Mean\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\214\001\n\003Min\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n;\n\007Minimum\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\003\t\220\001\001\n5\n\003Mod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\026\n\001T\022\004type:\013\n\t2\007\003\t\023\023\016\001\002\n=\n\003Mul\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\220\001\001\n.\n\003Neg\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nE\n\010NotEqual\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\005\n\001z\030\n\"\037\n\001T\022\004type:\024\n\0222\020\016\023\001\002\004\006\005\003\t\010\014\013\r\007\n\022\220\001\001\n6\n\tPolygamma\022\006\n\001a\"\001T\022\006\n\001x\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\n6\n\003Pow\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\001\023\002\003\t\010\022\n\215\001\n\004Prod\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\267\001\n\032QuantizeDownAndShrinkRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedAdd\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\n\235\002\n\017QuantizedMatMul\022\007\n\001a\"\002T1\022\007\n\001b\"\002T2\022\t\n\005min_a\030\001\022\t\n\005max_a\030\001\022\t\n\005min_b\030\001\022\t\n\005max_b\030\001\032\016\n\003out\"\007Toutput\032\013\n\007min_out\030\001\032\013\n\007max_out\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\"\n\013Tactivation\022\004type\032\0020\014:\t\n\0072\005\013\014\r\017\020\n\301\001\n\014QuantizedMul\022\007\n\001x\"\002T1\022\007\n\001y\"\002T2\022\t\n\005min_x\030\001\022\t\n\005max_x\030\001\022\t\n\005min_y\030\001\022\t\n\005max_y\030\001\032\014\n\001z\"\007Toutput\032\t\n\005min_z\030\001\032\t\n\005max_z\030\001\"\025\n\002T1\022\004type:\t\n\0072\005\013\014\r\017\020\"\025\n\002T2\022\004type:\t\n\0072\005\013\014\r\017\020\"\036\n\007Toutput\022\004type\032\0020\r:\t\n\0072\005\013\014\r\017\020\220\001\001\na\n\005Range\022\r\n\005start\"\004Tidx\022\r\n\005limit\"\004Tidx\022\r\n\005delta\"\004Tidx\032\016\n\006output\"\004Tidx\"\033\n\004Tidx\022\004type\032\0020\003:\t\n\0072\005\016\001\002\003\t\nS\n\004Real\022\n\n\005input\"\001T\032\016\n\006output\"\004Tout\"\025\n\001T\022\004type\032\0020\010:\006\n\0042\002\010\022\"\030\n\004Tout\022\004type\032\0020\001:\006\n\0042\002\001\002\n>\n\007RealDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n5\n\nReciprocal\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n@\n\016ReciprocalGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\177\n\023RequantizationRange\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\n\333\001\n\nRequantize\022\017\n\005input\"\006Tinput\022\r\n\tinput_min\030\001\022\r\n\tinput_max\030\001\022\030\n\024requested_output_min\030\001\022\030\n\024requested_output_max\030\001\032\022\n\006output\"\010out_type\032\016\n\noutput_min\030\001\032\016\n\noutput_max\030\001\"\031\n\006Tinput\022\004type:\t\n\0072\005\013\014\r\017\020\"\033\n\010out_type\022\004type:\t\n\0072\005\013\014\r\017\020\n+\n\004Rint\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\023\n\001T\022\004type:\010\n\0062\004\016\023\001\002\n0\n\005Round\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n.\n\005Rsqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n;\n\tRsqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nt\n\nSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentMean\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nt\n\nSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\nz\n\013SegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\ny\n\nSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\n?\n\006Select\022\r\n\tcondition\030\n\022\006\n\001t\"\001T\022\006\n\001e\"\001T\032\013\n\006output\"\001T\"\t\n\001T\022\004type\n0\n\007Sigmoid\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n=\n\013SigmoidGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n/\n\004Sign\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n,\n\003Sin\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n-\n\004Sinh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n\301\001\n\014SparseMatMul\022\007\n\001a\"\002Ta\022\007\n\001b\"\002Tb\032\013\n\007product\030\001\"\027\n\013transpose_a\022\004bool\032\002(\000\"\027\n\013transpose_b\022\004bool\032\002(\000\"\027\n\013a_is_sparse\022\004bool\032\002(\000\"\027\n\013b_is_sparse\022\004bool\032\002(\000\"\026\n\002Ta\022\004type\032\0020\001:\006\n\0042\002\001\016\"\026\n\002Tb\022\004type\032\0020\001:\006\n\0042\002\001\016\nz\n\021SparseSegmentMean\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\217\001\n\025SparseSegmentMeanGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\311\001\n SparseSegmentMeanWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n{\n\022SparseSegmentSqrtN\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\220\001\n\026SparseSegmentSqrtNGrad\022\t\n\004grad\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\017\n\013output_dim0\030\003\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\312\001\n!SparseSegmentSqrtNWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\203\001\n\020SparseSegmentSum\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n\322\001\n\037SparseSegmentSumWithNumSegments\022\t\n\004data\"\001T\022\017\n\007indices\"\004Tidx\022\017\n\013segment_ids\030\003\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n-\n\004Sqrt\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010SqrtGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n1\n\006Square\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\nG\n\021SquaredDifference\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\220\001\001\n:\n\003Sub\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n\214\001\n\003Sum\022\n\n\005input\"\001T\022\031\n\021reduction_indices\"\004Tidx\032\013\n\006output\"\001T\"\025\n\tkeep_dims\022\004bool\032\002(\000\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\004Tidx\022\004type\032\0020\003:\006\n\0042\002\003\t\n.\n\003Tan\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\027\n\001T\022\004type:\014\n\n2\010\016\023\001\002\003\t\010\022\n-\n\004Tanh\022\006\n\001x\"\001T\032\006\n\001y\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\n:\n\010TanhGrad\022\006\n\001y\"\001T\022\007\n\002dy\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\016\023\001\002\010\022\nB\n\013TruncateDiv\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\016\023\001\002\004\006\021\005\003\t\010\022\n<\n\013TruncateMod\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\025\n\001T\022\004type:\n\n\0102\006\003\t\016\023\001\002\n\274\001\n\022UnsortedSegmentMax\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\274\001\n\022UnsortedSegmentMin\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\"\033\n\001T\022\004type:\020\n\0162\014\001\002\003\004\005\006\t\016\021\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\302\001\n\023UnsortedSegmentProd\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n\301\001\n\022UnsortedSegmentSum\022\t\n\004data\"\001T\022\027\n\013segment_ids\"\010Tindices\022\034\n\014num_segments\"\014Tnumsegments\032\013\n\006output\"\001T\" \n\001T\022\004type:\025\n\0232\021\001\002\003\004\005\006\010\t\013\014\r\016\021\022\023\026\027\"\030\n\010Tindices\022\004type:\006\n\0042\002\003\t\" \n\014Tnumsegments\022\004type\032\0020\003:\006\n\0042\002\003\t\n5\n\005Xdivy\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\024\n\001T\022\004type:\t\n\0072\005\023\001\002\010\022\n5\n\005Xlogy\022\006\n\001x\"\001T\022\006\n\001y\"\001T\032\006\n\001z\"\001T\"\024\n\001T\022\004type:\t\n\0072\005\023\001\002\010\022\n1\n\004Zeta\022\006\n\001x\"\001T\022\006\n\001q\"\001T\032\006\n\001z\"\001T\"\021\n\001T\022\004type:\006\n\0042\002\001\002")